gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
import re
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
display_path,
is_console_interactive,
rmtree,
split_auth_from_netloc,
)
from pip._internal.utils.subprocess import make_command
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.vcs.versioncontrol import VersionControl, vcs
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile(r'committed-rev="(\d+)"')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
from pip._internal.utils.misc import HiddenText
from pip._internal.utils.subprocess import CommandArgs
from pip._internal.vcs.versioncontrol import AuthInfo, RevOptions
logger = logging.getLogger(__name__)
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
@classmethod
def should_add_vcs_url_prefix(cls, remote_url):
return True
@staticmethod
def get_base_rev_args(rev):
return ['-r', rev]
@classmethod
def get_revision(cls, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, _ in os.walk(location):
if cls.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(cls.dirname)
entries_fn = os.path.join(base, cls.dirname, 'entries')
if not os.path.exists(entries_fn):
# FIXME: should we warn?
continue
dirurl, localrev = cls._get_svn_url_rev(base)
if base == location:
base = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
@classmethod
def get_netloc_and_auth(cls, netloc, scheme):
"""
This override allows the auth information to be passed to svn via the
--username and --password options instead of via the URL.
"""
if scheme == 'ssh':
# The --username and --password options can't be used for
# svn+ssh URLs, so keep the auth information in the URL.
return super(Subversion, cls).get_netloc_and_auth(netloc, scheme)
return split_auth_from_netloc(netloc)
@classmethod
def get_url_rev_and_auth(cls, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev, user_pass = super(Subversion, cls).get_url_rev_and_auth(url)
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev, user_pass
@staticmethod
def make_rev_args(username, password):
# type: (Optional[str], Optional[HiddenText]) -> CommandArgs
extra_args = [] # type: CommandArgs
if username:
extra_args += ['--username', username]
if password:
extra_args += ['--password', password]
return extra_args
@classmethod
def get_remote_url(cls, location):
# In cases where the source is in a subdirectory, not alongside
# setup.py we have to look up in the location until we find a real
# setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
return cls._get_svn_url_rev(location)[0]
@classmethod
def _get_svn_url_rev(cls, location):
from pip._internal.exceptions import InstallationError
entries_path = os.path.join(location, cls.dirname, 'entries')
if os.path.exists(entries_path):
with open(entries_path) as f:
data = f.read()
else: # subversion >= 1.7 does not have the 'entries' file
data = ''
if (data.startswith('8') or
data.startswith('9') or
data.startswith('10')):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError(
'Badly formatted data: {data!r}'.format(**locals()))
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
# Note that using get_remote_call_options is not necessary here
# because `svn info` is being run against a local directory.
# We don't need to worry about making sure interactive mode
# is being used to prompt for passwords, because passwords
# are only potentially needed for remote server requests.
xml = cls.run_command(
['info', '--xml', location],
show_stdout=False,
stdout_only=True,
)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [
int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)
]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
@classmethod
def is_commit_id_equal(cls, dest, name):
"""Always assume the versions don't match"""
return False
def __init__(self, use_interactive=None):
# type: (bool) -> None
if use_interactive is None:
use_interactive = is_console_interactive()
self.use_interactive = use_interactive
# This member is used to cache the fetched version of the current
# ``svn`` client.
# Special value definitions:
# None: Not evaluated yet.
# Empty tuple: Could not parse version.
self._vcs_version = None # type: Optional[Tuple[int, ...]]
super(Subversion, self).__init__()
def call_vcs_version(self):
# type: () -> Tuple[int, ...]
"""Query the version of the currently installed Subversion client.
:return: A tuple containing the parts of the version information or
``()`` if the version returned from ``svn`` could not be parsed.
:raises: BadCommand: If ``svn`` is not installed.
"""
# Example versions:
# svn, version 1.10.3 (r1842928)
# compiled Feb 25 2019, 14:20:39 on x86_64-apple-darwin17.0.0
# svn, version 1.7.14 (r1542130)
# compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu
# svn, version 1.12.0-SlikSvn (SlikSvn/1.12.0)
# compiled May 28 2019, 13:44:56 on x86_64-microsoft-windows6.2
version_prefix = 'svn, version '
version = self.run_command(
['--version'], show_stdout=False, stdout_only=True
)
if not version.startswith(version_prefix):
return ()
version = version[len(version_prefix):].split()[0]
version_list = version.partition('-')[0].split('.')
try:
parsed_version = tuple(map(int, version_list))
except ValueError:
return ()
return parsed_version
def get_vcs_version(self):
# type: () -> Tuple[int, ...]
"""Return the version of the currently installed Subversion client.
If the version of the Subversion client has already been queried,
a cached value will be used.
:return: A tuple containing the parts of the version information or
``()`` if the version returned from ``svn`` could not be parsed.
:raises: BadCommand: If ``svn`` is not installed.
"""
if self._vcs_version is not None:
# Use cached version, if available.
# If parsing the version failed previously (empty tuple),
# do not attempt to parse it again.
return self._vcs_version
vcs_version = self.call_vcs_version()
self._vcs_version = vcs_version
return vcs_version
def get_remote_call_options(self):
# type: () -> CommandArgs
"""Return options to be used on calls to Subversion that contact the server.
These options are applicable for the following ``svn`` subcommands used
in this class.
- checkout
- export
- switch
- update
:return: A list of command line arguments to pass to ``svn``.
"""
if not self.use_interactive:
# --non-interactive switch is available since Subversion 0.14.4.
# Subversion < 1.8 runs in interactive mode by default.
return ['--non-interactive']
svn_version = self.get_vcs_version()
# By default, Subversion >= 1.8 runs in non-interactive mode if
# stdin is not a TTY. Since that is how pip invokes SVN, in
# call_subprocess(), pip must pass --force-interactive to ensure
# the user can be prompted for a password, if required.
# SVN added the --force-interactive option in SVN 1.8. Since
# e.g. RHEL/CentOS 7, which is supported until 2024, ships with
# SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip
# can't safely add the option if the SVN version is < 1.8 (or unknown).
if svn_version >= (1, 8):
return ['--force-interactive']
return []
def export(self, location, url):
# type: (str, HiddenText) -> None
"""Export the svn repository at the url to the destination location"""
url, rev_options = self.get_url_rev_options(url)
logger.info('Exporting svn repository %s to %s', url, location)
with indent_log():
if os.path.exists(location):
# Subversion doesn't like to check out over an existing
# directory --force fixes this, but was only added in svn 1.5
rmtree(location)
cmd_args = make_command(
'export', self.get_remote_call_options(),
rev_options.to_args(), url, location,
)
self.run_command(cmd_args, show_stdout=False)
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
rev_display = rev_options.to_display()
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
cmd_args = make_command(
'checkout', '-q', self.get_remote_call_options(),
rev_options.to_args(), url, dest,
)
self.run_command(cmd_args)
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
cmd_args = make_command(
'switch', self.get_remote_call_options(), rev_options.to_args(),
url, dest,
)
self.run_command(cmd_args)
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
cmd_args = make_command(
'update', self.get_remote_call_options(), rev_options.to_args(),
dest,
)
self.run_command(cmd_args)
vcs.register(Subversion)
|
|
#!/usr/bin/env python
import unittest
from graph import Graph
from graph_exceptions import DigraphError, NotDigraphError
class TestBasicOperations(unittest.TestCase):
def test_construct_with_params(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set(["a"]),
"e": set([])
}, digraph=True)
self.assertEqual(graph.order(), 5)
self.assertEqual(graph.out_degree("a"), 2)
self.assertTrue(graph._digraph)
def test_add(self):
graph = Graph(digraph=True)
graph.add("a")
self.assertEqual(graph.order(), 1)
self.assertEqual(graph.in_degree("a"), 0)
self.assertEqual(graph.out_degree("a"), 0)
def test_add_already_added(self):
graph = Graph(digraph=True)
graph.add("a")
self.assertEqual(graph.order(), 1)
self.assertEqual(graph.in_degree("a"), 0)
self.assertEqual(graph.out_degree("a"), 0)
graph.add("a")
self.assertEqual(graph.order(), 1)
self.assertEqual(graph.in_degree("a"), 0)
self.assertEqual(graph.out_degree("a"), 0)
def test_remove_without_vertices(self):
graph = Graph(digraph=True)
graph.remove("a")
self.assertEqual(graph.order(), 0)
self.assertEqual(graph.in_degree("a"), 0)
self.assertEqual(graph.out_degree("a"), 0)
def test_remove(self):
graph = Graph(digraph=True)
graph.add("a")
graph.remove("a")
self.assertEqual(graph.order(), 0)
self.assertEqual(graph.in_degree("a"), 0)
self.assertEqual(graph.out_degree("a"), 0)
def test_remove_already_removed(self):
graph = Graph(digraph=True)
graph.add("a")
graph.remove("a")
self.assertEqual(graph.order(), 0)
self.assertEqual(graph.in_degree("a"), 0)
self.assertEqual(graph.out_degree("a"), 0)
graph.remove("a")
self.assertEqual(graph.order(), 0)
self.assertEqual(graph.in_degree("a"), 0)
self.assertEqual(graph.out_degree("a"), 0)
def test_random(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set(["a"]),
"e": set([])
}, digraph=True)
random_vertex = graph.random()
self.assertTrue(random_vertex in graph._vertices)
def test_order(self): #nop
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set(["a"]),
"e": set([])
}, digraph=True)
self.assertEqual(graph.order(), 5)
graph.remove("e")
self.assertEqual(graph.order(), 4)
graph.remove("d")
self.assertEqual(graph.order(), 3)
graph.remove("c")
self.assertEqual(graph.order(), 2)
graph.remove("b")
self.assertEqual(graph.order(), 1)
graph.remove("a")
self.assertEqual(graph.order(), 0)
def test_adjacents_to(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set(["a"]),
"e": set([])
}, digraph=True)
with self.assertRaises(DigraphError):
adjancetsA = graph.adjacents_to("a")
self.assertEqual(len(adjancetsA), 2)
self.assertTrue(adjancetsA == set(["b", "d"]))
raise Exception("Something went wrong in adjacents_to.")
def test_degree(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set(["a"]),
"e": set([])
}, digraph=True)
with self.assertRaises(DigraphError):
graph.degree("a")
def test_connect(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set([]),
"e": set([])
}, digraph=True)
graph.connect("a", "c")
self.assertTrue(graph.predecessors("a") == set(["b"]))
self.assertTrue(graph.sucessors("a") == set(["b", "d", "c"]))
self.assertEqual(graph.in_degree("a"), 1)
self.assertEqual(graph.out_degree("a"), 3)
def test_connect_without_vertices(self):
graph = Graph(digraph=True)
graph.connect("a", "c")
self.assertFalse(graph.sucessors("a") == set(["c"]))
self.assertEqual(graph.in_degree("a"), 0)
self.assertEqual(graph.out_degree("a"), 0)
def test_connect_with_inexistent_vertex(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set([]),
"e": set([])
}, digraph=True)
graph.connect("a", "z")
graph.connect("a", "y")
graph.connect("a", "h")
graph.connect("h", "a")
self.assertFalse(graph.sucessors("a") == set(["b", "d", "z", "y", "h"]))
self.assertNotEqual(graph.out_degree("a"), 5)
self.assertTrue(graph.predecessors("a") == set(["b"]))
self.assertEqual(graph.in_degree("a"), 1)
def test_disconnect(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set([]),
"e": set([])
}, digraph=True)
graph.disconnect("a", "b")
self.assertTrue(graph.sucessors("a") == set(["d"]))
self.assertEqual(graph.in_degree("a"), 1)
self.assertEqual(graph.out_degree("a"), 1)
def test_disconnect_without_vertices(self):
graph = Graph(digraph=True)
self.assertFalse(graph.sucessors("a") == set(["c"]))
self.assertFalse(graph.predecessors("a") == set(["c"]))
self.assertEqual(graph.in_degree("a"), 0)
self.assertEqual(graph.out_degree("a"), 0)
graph.disconnect("a", "c")
self.assertFalse(graph.sucessors("a") == set(["c"]))
self.assertFalse(graph.predecessors("a") == set(["c"]))
self.assertEqual(graph.in_degree("a"), 0)
self.assertEqual(graph.out_degree("a"), 0)
def test_disconnect_with_inexistent_vertex(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set([]),
"e": set([])
}, digraph=True)
graph.disconnect("a", "z")
graph.disconnect("a", "y")
graph.disconnect("a", "h")
self.assertFalse(graph.sucessors("a") == set([]))
self.assertFalse(graph.predecessors("a") == set([]))
self.assertNotEqual(graph.in_degree("a"), 0)
self.assertNotEqual(graph.out_degree("a"), 0)
self.assertTrue(graph.sucessors("a") == set(["b", "d"]))
self.assertTrue(graph.predecessors("a") == set(["b"]))
self.assertEqual(graph.in_degree("a"), 1)
self.assertEqual(graph.out_degree("a"), 2)
def test_in_degree(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set([]),
"e": set([])
}, digraph=True)
self.assertEqual(graph.in_degree("a"), 1)
def test_out_degree(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set([]),
"e": set([])
}, digraph=True)
self.assertEqual(graph.out_degree("a"), 2)
def test_predecessors(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set([]),
"e": set([])
}, digraph=True)
self.assertTrue(graph.predecessors("a") == set(["b"]))
self.assertEqual(len(graph.predecessors("a")), 1)
def test_sucessors(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a"]),
"c": set([]),
"d": set([]),
"e": set([])
}, digraph=True)
self.assertTrue(graph.sucessors("a") == set(["b", "d"]))
self.assertEqual(len(graph.sucessors("a")), 2)
def test_random(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a", "c"]),
"c": set(["b", "e"]),
"d": set(["a", "e"]),
"e": set(["c", "d"])
})
vertex = graph.random()
self.assertTrue(vertex in graph.vertices())
class DerivedOperations(unittest.TestCase):
def test_is_regular(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a", "c"]),
"c": set(["b", "e"]),
"d": set(["a", "e"]),
"e": set(["c", "d"])
}, digraph=True)
with self.assertRaises(NotImplementedError):
graph.is_regular()
def test_is_regular_negative(self):
graph = Graph({
"a": set(["b", "d"]),
"b": set(["a", "c"]),
"c": set(["b", "e"]),
"d": set(["a"]),
"e": set(["c"])
}, digraph=True)
with self.assertRaises(NotImplementedError):
graph.is_regular()
def test_is_complete(self):
graph = Graph({
"a": set(["b", "c", "d", "e"]),
"b": set(["a", "c", "d", "e"]),
"c": set(["b", "d", "e", "a"]),
"d": set(["a", "b", "c", "e"]),
"e": set(["c", "a", "b", "d"])
}, digraph=True)
with self.assertRaises(NotImplementedError):
graph.is_complete()
def test_is_complete_negative(self):
graph = Graph({
"a": set(["b"]),
"b": set(["a"]),
"c": set(["e"]),
"d": set(["e"]),
"e": set(["c", "d"])
}, digraph=True)
with self.assertRaises(NotImplementedError):
graph.is_complete()
def test_is_connected(self):
graph = Graph({
"a": set(["b"]),
"b": set(["c"]),
"c": set(["d"]),
"d": set(["e"]),
"e": set(["a"])
}, digraph=True)
with self.assertRaises(NotImplementedError):
graph.is_connected()
def test_is_connected_negative(self):
graph = Graph({
"a": set(["b"]),
"b": set(["a"]),
"c": set(["e"]),
"d": set(["e"]),
"e": set(["c", "d"])
}, digraph=True)
with self.assertRaises(NotImplementedError):
graph.is_connected()
def test_is_tree(self):
graph = Graph({
"a": set(["b", "a"]),
"b": set(["a", "d", "e"]),
"c": set(["a"]),
"d": set(["b"]),
"e": set(["b"])
}, digraph=True)
with self.assertRaises(NotImplementedError):
graph.is_tree()
def test_is_tree_negative(self):
graph = Graph({
"a": set(["b", "a", "d", "e"]),
"b": set(["a", "d", "e"]),
"c": set(["a"]),
"d": set(["b", "a"]),
"e": set(["b", "a"])
}, digraph=True)
with self.assertRaises(NotImplementedError):
graph.is_tree()
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import re
from dateutil import parser as dateutil_parser
from oslo_utils import timeutils
from sqlalchemy.dialects import sqlite
from sqlalchemy import func
from sqlalchemy import MetaData
from sqlalchemy import select
from nova import context
from nova.db import api as db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova.tests.functional import test_servers
class TestDatabaseArchive(test_servers.ServersTestBase):
"""Tests DB API for archiving (soft) deleted records"""
def setUp(self):
super(TestDatabaseArchive, self).setUp()
# TODO(mriedem): pull this out so we can re-use it in
# test_archive_deleted_rows_fk_constraint
# SQLite doesn't enforce foreign key constraints without a pragma.
engine = sqlalchemy_api.get_engine()
dialect = engine.url.get_dialect()
if dialect == sqlite.dialect:
# We're seeing issues with foreign key support in SQLite 3.6.20
# SQLAlchemy doesn't support it at all with < SQLite 3.6.19
# It works fine in SQLite 3.7.
# So return early to skip this test if running SQLite < 3.7
import sqlite3
tup = sqlite3.sqlite_version_info
if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
self.skipTest(
'sqlite version too old for reliable SQLA foreign_keys')
engine.connect().execute("PRAGMA foreign_keys = ON")
def _create_server(self):
"""Creates a minimal test server via the compute API
Ensures the server is created and can be retrieved from the compute API
and waits for it to be ACTIVE.
:returns: created server (dict)
"""
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
return found_server
def test_archive_deleted_rows(self):
# Boots a server, deletes it, and then tries to archive it.
server = self._create_server()
server_id = server['id']
# Assert that there are instance_actions. instance_actions are
# interesting since we don't soft delete them but they have a foreign
# key back to the instances table.
actions = self.api.get_instance_actions(server_id)
self.assertTrue(len(actions),
'No instance actions for server: %s' % server_id)
self._delete_server(server_id)
# Verify we have the soft deleted instance in the database.
admin_context = context.get_admin_context(read_deleted='yes')
# This will raise InstanceNotFound if it's not found.
instance = db.instance_get_by_uuid(admin_context, server_id)
# Make sure it's soft deleted.
self.assertNotEqual(0, instance.deleted)
# Verify we have some system_metadata since we'll check that later.
self.assertTrue(len(instance.system_metadata),
'No system_metadata for instance: %s' % server_id)
# Now try and archive the soft deleted records.
results, deleted_instance_uuids = db.archive_deleted_rows(max_rows=100)
# verify system_metadata was dropped
self.assertIn('instance_system_metadata', results)
self.assertEqual(len(instance.system_metadata),
results['instance_system_metadata'])
# Verify that instances rows are dropped
self.assertIn('instances', results)
# Verify that instance_actions and actions_event are dropped
# by the archive
self.assertIn('instance_actions', results)
self.assertIn('instance_actions_events', results)
def test_archive_deleted_rows_with_undeleted_residue(self):
# Boots a server, deletes it, and then tries to archive it.
server = self._create_server()
server_id = server['id']
# Assert that there are instance_actions. instance_actions are
# interesting since we don't soft delete them but they have a foreign
# key back to the instances table.
actions = self.api.get_instance_actions(server_id)
self.assertTrue(len(actions),
'No instance actions for server: %s' % server_id)
self._delete_server(server_id)
# Verify we have the soft deleted instance in the database.
admin_context = context.get_admin_context(read_deleted='yes')
# This will raise InstanceNotFound if it's not found.
instance = db.instance_get_by_uuid(admin_context, server_id)
# Make sure it's soft deleted.
self.assertNotEqual(0, instance.deleted)
# Undelete the instance_extra record to make sure we delete it anyway
extra = db.instance_extra_get_by_instance_uuid(admin_context,
instance.uuid)
self.assertNotEqual(0, extra.deleted)
db.instance_extra_update_by_uuid(admin_context, instance.uuid,
{'deleted': 0})
extra = db.instance_extra_get_by_instance_uuid(admin_context,
instance.uuid)
self.assertEqual(0, extra.deleted)
# Verify we have some system_metadata since we'll check that later.
self.assertTrue(len(instance.system_metadata),
'No system_metadata for instance: %s' % server_id)
# Now try and archive the soft deleted records.
results, deleted_instance_uuids = db.archive_deleted_rows(max_rows=100)
# verify system_metadata was dropped
self.assertIn('instance_system_metadata', results)
self.assertEqual(len(instance.system_metadata),
results['instance_system_metadata'])
# Verify that instances rows are dropped
self.assertIn('instances', results)
# Verify that instance_actions and actions_event are dropped
# by the archive
self.assertIn('instance_actions', results)
self.assertIn('instance_actions_events', results)
def _get_table_counts(self):
engine = sqlalchemy_api.get_engine()
conn = engine.connect()
meta = MetaData(engine)
meta.reflect()
shadow_tables = sqlalchemy_api._purgeable_tables(meta)
results = {}
for table in shadow_tables:
r = conn.execute(
select([func.count()]).select_from(table)).fetchone()
results[table.name] = r[0]
return results
def test_archive_then_purge_all(self):
server = self._create_server()
server_id = server['id']
self._delete_server(server_id)
results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
self.assertEqual([server_id], deleted_ids)
lines = []
def status(msg):
lines.append(msg)
admin_context = context.get_admin_context()
deleted = sqlalchemy_api.purge_shadow_tables(admin_context,
None, status_fn=status)
self.assertNotEqual(0, deleted)
self.assertNotEqual(0, len(lines))
for line in lines:
self.assertIsNotNone(re.match(r'Deleted [1-9][0-9]* rows from .*',
line))
results = self._get_table_counts()
# No table should have any rows
self.assertFalse(any(results.values()))
def test_archive_then_purge_by_date(self):
server = self._create_server()
server_id = server['id']
self._delete_server(server_id)
results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
self.assertEqual([server_id], deleted_ids)
pre_purge_results = self._get_table_counts()
past = timeutils.utcnow() - datetime.timedelta(hours=1)
admin_context = context.get_admin_context()
deleted = sqlalchemy_api.purge_shadow_tables(admin_context,
past)
# Make sure we didn't delete anything if the marker is before
# we started
self.assertEqual(0, deleted)
results = self._get_table_counts()
# Nothing should be changed if we didn't purge anything
self.assertEqual(pre_purge_results, results)
future = timeutils.utcnow() + datetime.timedelta(hours=1)
deleted = sqlalchemy_api.purge_shadow_tables(admin_context, future)
# Make sure we deleted things when the marker is after
# we started
self.assertNotEqual(0, deleted)
results = self._get_table_counts()
# There should be no rows in any table if we purged everything
self.assertFalse(any(results.values()))
def test_purge_with_real_date(self):
"""Make sure the result of dateutil's parser works with the
query we're making to sqlalchemy.
"""
server = self._create_server()
server_id = server['id']
self._delete_server(server_id)
results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
self.assertEqual([server_id], deleted_ids)
date = dateutil_parser.parse('oct 21 2015', fuzzy=True)
admin_context = context.get_admin_context()
deleted = sqlalchemy_api.purge_shadow_tables(admin_context, date)
self.assertEqual(0, deleted)
|
|
from os.path import (join, dirname, normpath, isfile,
isdir, exists, realpath, basename)
from os import chdir
import importlib
import glob
from shutil import rmtree
from six import PY2, with_metaclass
import hashlib
from re import match
import sh
import shutil
import fnmatch
from os import listdir, unlink, environ, mkdir, curdir, walk
from sys import stdout
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from pythonforandroid.logger import (logger, info, warning, error, debug, shprint, info_main)
from pythonforandroid.util import (urlretrieve, current_directory, ensure_dir)
# this import is necessary to keep imp.load_source from complaining :)
import pythonforandroid.recipes
if PY2:
import imp
import_recipe = imp.load_source
else:
import importlib.util
if hasattr(importlib.util, 'module_from_spec'):
def import_recipe(module, filename):
spec = importlib.util.spec_from_file_location(module, filename)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
else:
from importlib.machinery import SourceFileLoader
def import_recipe(module, filename):
return SourceFileLoader(module, filename).load_module()
class RecipeMeta(type):
def __new__(cls, name, bases, dct):
if name != 'Recipe':
if 'url' in dct:
dct['_url'] = dct.pop('url')
if 'version' in dct:
dct['_version'] = dct.pop('version')
return super(RecipeMeta, cls).__new__(cls, name, bases, dct)
class Recipe(with_metaclass(RecipeMeta)):
_url = None
'''The address from which the recipe may be downloaded. This is not
essential, it may be omitted if the source is available some other
way, such as via the :class:`IncludedFilesBehaviour` mixin.
If the url includes the version, you may (and probably should)
replace this with ``{version}``, which will automatically be
replaced by the :attr:`version` string during download.
.. note:: Methods marked (internal) are used internally and you
probably don't need to call them, but they are available
if you want.
'''
_version = None
'''A string giving the version of the software the recipe describes,
e.g. ``2.0.3`` or ``master``.'''
md5sum = None
'''The md5sum of the source from the :attr:`url`. Non-essential, but
you should try to include this, it is used to check that the download
finished correctly.
'''
depends = []
'''A list containing the names of any recipes that this recipe depends on.
'''
conflicts = []
'''A list containing the names of any recipes that are known to be
incompatible with this one.'''
opt_depends = []
'''A list of optional dependencies, that must be built before this
recipe if they are built at all, but whose presence is not essential.'''
patches = []
'''A list of patches to apply to the source. Values can be either a string
referring to the patch file relative to the recipe dir, or a tuple of the
string patch file and a callable, which will receive the kwargs `arch` and
`recipe`, which should return True if the patch should be applied.'''
python_depends = []
'''A list of pure-Python packages that this package requires. These
packages will NOT be available at build time, but will be added to the
list of pure-Python packages to install via pip. If you need these packages
at build time, you must create a recipe.'''
archs = ['armeabi'] # Not currently implemented properly
@property
def version(self):
key = 'VERSION_' + self.name
return environ.get(key, self._version)
@property
def url(self):
key = 'URL_' + self.name
return environ.get(key, self._url)
@property
def versioned_url(self):
'''A property returning the url of the recipe with ``{version}``
replaced by the :attr:`url`. If accessing the url, you should use this
property, *not* access the url directly.'''
if self.url is None:
return None
return self.url.format(version=self.version)
def download_file(self, url, target, cwd=None):
"""
(internal) Download an ``url`` to a ``target``.
"""
if not url:
return
info('Downloading {} from {}'.format(self.name, url))
if cwd:
target = join(cwd, target)
parsed_url = urlparse(url)
if parsed_url.scheme in ('http', 'https'):
def report_hook(index, blksize, size):
if size <= 0:
progression = '{0} bytes'.format(index * blksize)
else:
progression = '{0:.2f}%'.format(
index * blksize * 100. / float(size))
stdout.write('- Download {}\r'.format(progression))
stdout.flush()
if exists(target):
unlink(target)
urlretrieve(url, target, report_hook)
return target
elif parsed_url.scheme in ('git', 'git+file', 'git+ssh', 'git+http', 'git+https'):
if isdir(target):
with current_directory(target):
shprint(sh.git, 'fetch', '--tags')
if self.version:
shprint(sh.git, 'checkout', self.version)
shprint(sh.git, 'pull')
shprint(sh.git, 'pull', '--recurse-submodules')
shprint(sh.git, 'submodule', 'update', '--recursive')
else:
if url.startswith('git+'):
url = url[4:]
shprint(sh.git, 'clone', '--recursive', url, target)
if self.version:
with current_directory(target):
shprint(sh.git, 'checkout', self.version)
shprint(sh.git, 'submodule', 'update', '--recursive')
return target
# def get_archive_rootdir(self, filename):
# if filename.endswith(".tgz") or filename.endswith(".tar.gz") or \
# filename.endswith(".tbz2") or filename.endswith(".tar.bz2"):
# archive = tarfile.open(filename)
# root = archive.next().path.split("/")
# return root[0]
# elif filename.endswith(".zip"):
# with zipfile.ZipFile(filename) as zf:
# return dirname(zf.namelist()[0])
# else:
# print("Error: cannot detect root directory")
# print("Unrecognized extension for {}".format(filename))
# raise Exception()
def apply_patch(self, filename, arch):
"""
Apply a patch from the current recipe directory into the current
build directory.
"""
info("Applying patch {}".format(filename))
filename = join(self.get_recipe_dir(), filename)
shprint(sh.patch, "-t", "-d", self.get_build_dir(arch), "-p1",
"-i", filename, _tail=10)
def copy_file(self, filename, dest):
info("Copy {} to {}".format(filename, dest))
filename = join(self.get_recipe_dir(), filename)
dest = join(self.build_dir, dest)
shutil.copy(filename, dest)
def append_file(self, filename, dest):
info("Append {} to {}".format(filename, dest))
filename = join(self.get_recipe_dir(), filename)
dest = join(self.build_dir, dest)
with open(filename, "rb") as fd:
data = fd.read()
with open(dest, "ab") as fd:
fd.write(data)
# def has_marker(self, marker):
# """
# Return True if the current build directory has the marker set
# """
# return exists(join(self.build_dir, ".{}".format(marker)))
# def set_marker(self, marker):
# """
# Set a marker info the current build directory
# """
# with open(join(self.build_dir, ".{}".format(marker)), "w") as fd:
# fd.write("ok")
# def delete_marker(self, marker):
# """
# Delete a specific marker
# """
# try:
# unlink(join(self.build_dir, ".{}".format(marker)))
# except:
# pass
@property
def name(self):
'''The name of the recipe, the same as the folder containing it.'''
modname = self.__class__.__module__
return modname.split(".", 2)[-1]
# @property
# def archive_fn(self):
# bfn = basename(self.url.format(version=self.version))
# fn = "{}/{}-{}".format(
# self.ctx.cache_dir,
# self.name, bfn)
# return fn
@property
def filtered_archs(self):
'''Return archs of self.ctx that are valid build archs
for the Recipe.'''
result = []
for arch in self.ctx.archs:
if not self.archs or (arch.arch in self.archs):
result.append(arch)
return result
def check_recipe_choices(self):
'''Checks what recipes are being built to see which of the alternative
and optional dependencies are being used,
and returns a list of these.'''
recipes = []
built_recipes = self.ctx.recipe_build_order
for recipe in self.depends:
if isinstance(recipe, (tuple, list)):
for alternative in recipe:
if alternative in built_recipes:
recipes.append(alternative)
break
for recipe in self.opt_depends:
if recipe in built_recipes:
recipes.append(recipe)
return sorted(recipes)
def get_build_container_dir(self, arch):
'''Given the arch name, returns the directory where it will be
built.
This returns a different directory depending on what
alternative or optional dependencies are being built.
'''
dir_name = self.get_dir_name()
return join(self.ctx.build_dir, 'other_builds', dir_name, arch)
def get_dir_name(self):
choices = self.check_recipe_choices()
dir_name = '-'.join([self.name] + choices)
return dir_name
def get_build_dir(self, arch):
'''Given the arch name, returns the directory where the
downloaded/copied package will be built.'''
return join(self.get_build_container_dir(arch), self.name)
def get_recipe_dir(self):
return join(self.ctx.root_dir, 'recipes', self.name)
# Public Recipe API to be subclassed if needed
def download_if_necessary(self):
info_main('Downloading {}'.format(self.name))
user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower()))
if user_dir is not None:
info('P4A_{}_DIR is set, skipping download for {}'.format(
self.name, self.name))
return
self.download()
def download(self):
if self.url is None:
info('Skipping {} download as no URL is set'.format(self.name))
return
url = self.versioned_url
ma = match(u'^(.+)#md5=([0-9a-f]{32})$', url)
if ma: # fragmented URL?
if self.md5sum:
raise ValueError(
('Received md5sum from both the {} recipe '
'and its url').format(self.name))
url = ma.group(1)
expected_md5 = ma.group(2)
else:
expected_md5 = self.md5sum
shprint(sh.mkdir, '-p', join(self.ctx.packages_path, self.name))
with current_directory(join(self.ctx.packages_path, self.name)):
filename = shprint(sh.basename, url).stdout[:-1].decode('utf-8')
do_download = True
marker_filename = '.mark-{}'.format(filename)
if exists(filename) and isfile(filename):
if not exists(marker_filename):
shprint(sh.rm, filename)
elif expected_md5:
current_md5 = md5sum(filename)
if current_md5 != expected_md5:
debug('* Generated md5sum: {}'.format(current_md5))
debug('* Expected md5sum: {}'.format(expected_md5))
raise ValueError(
('Generated md5sum does not match expected md5sum '
'for {} recipe').format(self.name))
do_download = False
else:
do_download = False
# If we got this far, we will download
if do_download:
debug('Downloading {} from {}'.format(self.name, url))
shprint(sh.rm, '-f', marker_filename)
self.download_file(self.versioned_url, filename)
shprint(sh.touch, marker_filename)
if exists(filename) and isfile(filename) and expected_md5:
current_md5 = md5sum(filename)
if expected_md5 is not None:
if current_md5 != expected_md5:
debug('* Generated md5sum: {}'.format(current_md5))
debug('* Expected md5sum: {}'.format(expected_md5))
raise ValueError(
('Generated md5sum does not match expected md5sum '
'for {} recipe').format(self.name))
else:
info('{} download already cached, skipping'.format(self.name))
def unpack(self, arch):
info_main('Unpacking {} for {}'.format(self.name, arch))
build_dir = self.get_build_container_dir(arch)
user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower()))
if user_dir is not None:
info('P4A_{}_DIR exists, symlinking instead'.format(
self.name.lower()))
if exists(self.get_build_dir(arch)):
return
shprint(sh.rm, '-rf', build_dir)
shprint(sh.mkdir, '-p', build_dir)
shprint(sh.rmdir, build_dir)
ensure_dir(build_dir)
shprint(sh.cp, '-a', user_dir, self.get_build_dir(arch))
return
if self.url is None:
info('Skipping {} unpack as no URL is set'.format(self.name))
return
filename = shprint(
sh.basename, self.versioned_url).stdout[:-1].decode('utf-8')
ma = match(u'^(.+)#md5=([0-9a-f]{32})$', filename)
if ma: # fragmented URL?
filename = ma.group(1)
with current_directory(build_dir):
directory_name = self.get_build_dir(arch)
if not exists(directory_name) or not isdir(directory_name):
extraction_filename = join(
self.ctx.packages_path, self.name, filename)
if isfile(extraction_filename):
if extraction_filename.endswith('.zip'):
try:
sh.unzip(extraction_filename)
except (sh.ErrorReturnCode_1, sh.ErrorReturnCode_2):
pass # return code 1 means unzipping had
# warnings but did complete,
# apparently happens sometimes with
# github zips
import zipfile
fileh = zipfile.ZipFile(extraction_filename, 'r')
root_directory = fileh.filelist[0].filename.split('/')[0]
if root_directory != basename(directory_name):
shprint(sh.mv, root_directory, directory_name)
elif (extraction_filename.endswith('.tar.gz') or
extraction_filename.endswith('.tgz') or
extraction_filename.endswith('.tar.bz2') or
extraction_filename.endswith('.tbz2') or
extraction_filename.endswith('.tar.xz') or
extraction_filename.endswith('.txz')):
sh.tar('xf', extraction_filename)
root_directory = shprint(
sh.tar, 'tf', extraction_filename).stdout.decode(
'utf-8').split('\n')[0].split('/')[0]
if root_directory != directory_name:
shprint(sh.mv, root_directory, directory_name)
else:
raise Exception(
'Could not extract {} download, it must be .zip, '
'.tar.gz or .tar.bz2 or .tar.xz'.format(extraction_filename))
elif isdir(extraction_filename):
mkdir(directory_name)
for entry in listdir(extraction_filename):
if entry not in ('.git',):
shprint(sh.cp, '-Rv',
join(extraction_filename, entry),
directory_name)
else:
raise Exception(
'Given path is neither a file nor a directory: {}'
.format(extraction_filename))
else:
info('{} is already unpacked, skipping'.format(self.name))
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
"""Return the env specialized for the recipe
"""
if arch is None:
arch = self.filtered_archs[0]
return arch.get_env(with_flags_in_cc=with_flags_in_cc)
def prebuild_arch(self, arch):
'''Run any pre-build tasks for the Recipe. By default, this checks if
any prebuild_archname methods exist for the archname of the current
architecture, and runs them if so.'''
prebuild = "prebuild_{}".format(arch.arch.replace('-', '_'))
if hasattr(self, prebuild):
getattr(self, prebuild)()
else:
info('{} has no {}, skipping'.format(self.name, prebuild))
def is_patched(self, arch):
build_dir = self.get_build_dir(arch.arch)
return exists(join(build_dir, '.patched'))
def apply_patches(self, arch):
'''Apply any patches for the Recipe.'''
if self.patches:
info_main('Applying patches for {}[{}]'
.format(self.name, arch.arch))
if self.is_patched(arch):
info_main('{} already patched, skipping'.format(self.name))
return
for patch in self.patches:
if isinstance(patch, (tuple, list)):
patch, patch_check = patch
if not patch_check(arch=arch, recipe=self):
continue
self.apply_patch(
patch.format(version=self.version, arch=arch.arch),
arch.arch)
shprint(sh.touch, join(self.get_build_dir(arch.arch), '.patched'))
def should_build(self, arch):
'''Should perform any necessary test and return True only if it needs
building again.
'''
return True
def build_arch(self, arch):
'''Run any build tasks for the Recipe. By default, this checks if
any build_archname methods exist for the archname of the current
architecture, and runs them if so.'''
build = "build_{}".format(arch.arch)
if hasattr(self, build):
getattr(self, build)()
def postbuild_arch(self, arch):
'''Run any post-build tasks for the Recipe. By default, this checks if
any postbuild_archname methods exist for the archname of the
current architecture, and runs them if so.
'''
postbuild = "postbuild_{}".format(arch.arch)
if hasattr(self, postbuild):
getattr(self, postbuild)()
def prepare_build_dir(self, arch):
'''Copies the recipe data into a build dir for the given arch. By
default, this unpacks a downloaded recipe. You should override
it (or use a Recipe subclass with different behaviour) if you
want to do something else.
'''
self.unpack(arch)
def clean_build(self, arch=None):
'''Deletes all the build information of the recipe.
If arch is not None, only this arch dir is deleted. Otherwise
(the default) all builds for all archs are deleted.
By default, this just deletes the main build dir. If the
recipe has e.g. object files biglinked, or .so files stored
elsewhere, you should override this method.
This method is intended for testing purposes, it may have
strange results. Rebuild everything if this seems to happen.
'''
if arch is None:
base_dir = join(self.ctx.build_dir, 'other_builds', self.name)
else:
base_dir = self.get_build_container_dir(arch)
dirs = glob.glob(base_dir + '-*')
if exists(base_dir):
dirs.append(base_dir)
if not dirs:
warning(('Attempted to clean build for {} but found no existing '
'build dirs').format(self.name))
for directory in dirs:
if exists(directory):
info('Deleting {}'.format(directory))
shutil.rmtree(directory)
# Delete any Python distributions to ensure the recipe build
# doesn't persist in site-packages
shutil.rmtree(self.ctx.python_installs_dir)
def install_libs(self, arch, *libs):
libs_dir = self.ctx.get_libs_dir(arch.arch)
if not libs:
warning('install_libs called with no libraries to install!')
return
args = libs + (libs_dir,)
shprint(sh.cp, *args)
def has_libs(self, arch, *libs):
return all(map(lambda l: self.ctx.has_lib(arch.arch, l), libs))
@classmethod
def recipe_dirs(cls, ctx):
recipe_dirs = []
if ctx.local_recipes is not None:
recipe_dirs.append(realpath(ctx.local_recipes))
if ctx.storage_dir:
recipe_dirs.append(join(ctx.storage_dir, 'recipes'))
recipe_dirs.append(join(ctx.root_dir, "recipes"))
return recipe_dirs
@classmethod
def list_recipes(cls, ctx):
forbidden_dirs = ('__pycache__', )
for recipes_dir in cls.recipe_dirs(ctx):
if recipes_dir and exists(recipes_dir):
for name in listdir(recipes_dir):
if name in forbidden_dirs:
continue
fn = join(recipes_dir, name)
if isdir(fn):
yield name
@classmethod
def get_recipe(cls, name, ctx):
'''Returns the Recipe with the given name, if it exists.'''
if not hasattr(cls, "recipes"):
cls.recipes = {}
if name in cls.recipes:
return cls.recipes[name]
recipe_file = None
for recipes_dir in cls.recipe_dirs(ctx):
recipe_file = join(recipes_dir, name, '__init__.py')
if exists(recipe_file):
break
recipe_file = None
if not recipe_file:
raise IOError('Recipe does not exist: {}'.format(name))
mod = import_recipe('pythonforandroid.recipes.{}'.format(name), recipe_file)
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[1])
recipe = mod.recipe
recipe.ctx = ctx
cls.recipes[name] = recipe
return recipe
class IncludedFilesBehaviour(object):
'''Recipe mixin class that will automatically unpack files included in
the recipe directory.'''
src_filename = None
def prepare_build_dir(self, arch):
if self.src_filename is None:
print('IncludedFilesBehaviour failed: no src_filename specified')
exit(1)
shprint(sh.rm, '-rf', self.get_build_dir(arch))
shprint(sh.cp, '-a', join(self.get_recipe_dir(), self.src_filename),
self.get_build_dir(arch))
class BootstrapNDKRecipe(Recipe):
'''A recipe class for recipes built in an Android project jni dir with
an Android.mk. These are not cached separatly, but built in the
bootstrap's own building directory.
To build an NDK project which is not part of the bootstrap, see
:class:`~pythonforandroid.recipe.NDKRecipe`.
'''
dir_name = None # The name of the recipe build folder in the jni dir
def get_build_container_dir(self, arch):
return self.get_jni_dir()
def get_build_dir(self, arch):
if self.dir_name is None:
raise ValueError('{} recipe doesn\'t define a dir_name, but '
'this is necessary'.format(self.name))
return join(self.get_build_container_dir(arch), self.dir_name)
def get_jni_dir(self):
return join(self.ctx.bootstrap.build_dir, 'jni')
class NDKRecipe(Recipe):
'''A recipe class for any NDK project not included in the bootstrap.'''
generated_libraries = []
def should_build(self, arch):
lib_dir = self.get_lib_dir(arch)
for lib in self.generated_libraries:
if not exists(join(lib_dir, lib)):
return True
return False
def get_lib_dir(self, arch):
return join(self.get_build_dir(arch.arch), 'obj', 'local', arch.arch)
def get_jni_dir(self, arch):
return join(self.get_build_dir(arch.arch), 'jni')
def build_arch(self, arch, *extra_args):
super(NDKRecipe, self).build_arch(arch)
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
shprint(sh.ndk_build, 'V=1', 'APP_ABI=' + arch.arch, *extra_args, _env=env)
class PythonRecipe(Recipe):
site_packages_name = None
'''The name of the module's folder when installed in the Python
site-packages (e.g. for pyjnius it is 'jnius')'''
call_hostpython_via_targetpython = True
'''If True, tries to install the module using the hostpython binary
copied to the target (normally arm) python build dir. However, this
will fail if the module tries to import e.g. _io.so. Set this to False
to call hostpython from its own build dir, installing the module in
the right place via arguments to setup.py. However, this may not set
the environment correctly and so False is not the default.'''
install_in_hostpython = False
'''If True, additionally installs the module in the hostpython build
dir. This will make it available to other recipes if
call_hostpython_via_targetpython is False.
'''
install_in_targetpython = True
'''If True, installs the module in the targetpython installation dir.
This is almost always what you want to do.'''
setup_extra_args = []
'''List of extra arugments to pass to setup.py'''
def clean_build(self, arch=None):
super(PythonRecipe, self).clean_build(arch=arch)
name = self.folder_name
python_install_dirs = glob.glob(join(self.ctx.python_installs_dir, '*'))
for python_install in python_install_dirs:
site_packages_dir = glob.glob(join(python_install, 'lib', 'python*',
'site-packages'))
if site_packages_dir:
build_dir = join(site_packages_dir[0], name)
if exists(build_dir):
info('Deleted {}'.format(build_dir))
rmtree(build_dir)
@property
def real_hostpython_location(self):
if 'hostpython2' in self.ctx.recipe_build_order:
return join(
Recipe.get_recipe('hostpython2', self.ctx).get_build_dir(),
'hostpython')
else:
python_recipe = self.ctx.python_recipe
return 'python{}'.format(python_recipe.version)
@property
def hostpython_location(self):
if not self.call_hostpython_via_targetpython:
return self.real_hostpython_location
return self.ctx.hostpython
@property
def folder_name(self):
'''The name of the build folders containing this recipe.'''
name = self.site_packages_name
if name is None:
name = self.name
return name
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = super(PythonRecipe, self).get_recipe_env(arch, with_flags_in_cc)
env['PYTHONNOUSERSITE'] = '1'
if not self.call_hostpython_via_targetpython:
hppath = []
hppath.append(join(dirname(self.hostpython_location), 'Lib'))
hppath.append(join(hppath[0], 'site-packages'))
builddir = join(dirname(self.hostpython_location), 'build')
hppath += [join(builddir, d) for d in listdir(builddir)
if isdir(join(builddir, d))]
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = ':'.join(hppath + [env['PYTHONPATH']])
else:
env['PYTHONPATH'] = ':'.join(hppath)
return env
def should_build(self, arch):
name = self.folder_name
if self.ctx.has_package(name):
info('Python package already exists in site-packages')
return False
info('{} apparently isn\'t already in site-packages'.format(name))
return True
def build_arch(self, arch):
'''Install the Python module by calling setup.py install with
the target Python dir.'''
super(PythonRecipe, self).build_arch(arch)
self.install_python_package(arch)
def install_python_package(self, arch, name=None, env=None, is_dir=True):
'''Automate the installation of a Python package (or a cython
package where the cython components are pre-built).'''
# arch = self.filtered_archs[0] # old kivy-ios way
if name is None:
name = self.name
if env is None:
env = self.get_recipe_env(arch)
info('Installing {} into site-packages'.format(self.name))
with current_directory(self.get_build_dir(arch.arch)):
hostpython = sh.Command(self.hostpython_location)
if self.ctx.python_recipe.from_crystax:
hpenv = env.copy()
shprint(hostpython, 'setup.py', 'install', '-O2',
'--root={}'.format(self.ctx.get_python_install_dir()),
'--install-lib=.',
_env=hpenv, *self.setup_extra_args)
elif self.call_hostpython_via_targetpython:
shprint(hostpython, 'setup.py', 'install', '-O2', _env=env,
*self.setup_extra_args)
else:
hppath = join(dirname(self.hostpython_location), 'Lib',
'site-packages')
hpenv = env.copy()
if 'PYTHONPATH' in hpenv:
hpenv['PYTHONPATH'] = ':'.join([hppath] +
hpenv['PYTHONPATH'].split(':'))
else:
hpenv['PYTHONPATH'] = hppath
shprint(hostpython, 'setup.py', 'install', '-O2',
'--root={}'.format(self.ctx.get_python_install_dir()),
'--install-lib=lib/python2.7/site-packages',
_env=hpenv, *self.setup_extra_args)
# If asked, also install in the hostpython build dir
if self.install_in_hostpython:
self.install_hostpython_package(arch)
def get_hostrecipe_env(self, arch):
env = {}
env['PATH'] = '/usr/local/bin:/usr/bin:/bin'
env['PYTHONPATH'] = join(dirname(self.real_hostpython_location),
'Lib', 'site-packages')
return env
def install_hostpython_package(self, arch):
info('install_hostpython_package: {}'.format(self.name))
env = self.get_hostrecipe_env(arch)
real_hostpython = sh.Command(self.real_hostpython_location)
shprint(real_hostpython, 'setup.py', 'install', '-O2',
'--root={}'.format(dirname(self.real_hostpython_location)),
'--install-lib=Lib/site-packages',
_env=env, *self.setup_extra_args)
def pip_install_hostpython_package(self, arch):
""" calling recipe build_arch() must have 'host_pip' in depends[] """
info('pip_install_hostpython_package: {}'.format(self.name))
package = (self.name + '==' + self.version if self.version
else self.name)
env = self.get_hostrecipe_env(arch)
real_hostpython = sh.Command(self.real_hostpython_location)
build_dir = normpath(join(dirname(self.real_hostpython_location),
'..', self.name))
target_dir = env['PYTHONPATH']
ensure_dir(build_dir)
with current_directory(build_dir):
shprint(real_hostpython, '-mpip', 'install', '--upgrade', package,
"--build={}".format(build_dir),
"--target={}".format(target_dir),
_env=env, *self.setup_extra_args)
class CompiledComponentsPythonRecipe(PythonRecipe):
pre_build_ext = False
build_cmd = 'build_ext'
def get_recipe_env(self, arch):
env = super(CompiledComponentsPythonRecipe, self).get_recipe_env(arch)
env['LDFLAGS'] += ' -lpython2.7'
env['LD_LIBRARY_PATH'] = "/data/data/{}/lib".format(self.ctx.appId)
env['LDSHARED'] = env['CC'] + ' -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions'
return env
def build_arch(self, arch):
'''Build any cython components, then install the Python module by
calling setup.py install with the target Python dir.
'''
Recipe.build_arch(self, arch)
self.build_compiled_components(arch)
self.install_python_package(arch)
def build_compiled_components(self, arch):
info('Building compiled components in {}'.format(self.name))
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
hostpython = sh.Command(self.hostpython_location)
if self.install_in_hostpython:
shprint(hostpython, 'setup.py', 'clean', '--all', _env=env)
shprint(hostpython, 'setup.py', self.build_cmd, '-v',
_env=env, *self.setup_extra_args)
build_dir = glob.glob('build/lib.*')[0]
shprint(sh.find, build_dir, '-name', '"*.so"', '-exec',
env['STRIP'], '{}', ';', _env=env)
def install_hostpython_package(self, arch):
env = self.get_hostrecipe_env(arch)
self.rebuild_compiled_components(arch, env)
super(CompiledComponentsPythonRecipe, self).install_hostpython_package(arch)
def rebuild_compiled_components(self, arch, env):
info('Rebuilding compiled components in {}'.format(self.name))
hostpython = sh.Command(self.real_hostpython_location)
shprint(hostpython, 'setup.py', 'clean', '--all', _env=env)
shprint(hostpython, 'setup.py', self.build_cmd, '-v', _env=env,
*self.setup_extra_args)
class CppCompiledComponentsPythonRecipe(CompiledComponentsPythonRecipe):
""" Extensions that require the cxx-stl """
call_hostpython_via_targetpython = False
def get_recipe_env(self, arch):
env = super(CppCompiledComponentsPythonRecipe, self).get_recipe_env(arch)
keys = dict(
ctx=self.ctx,
arch=arch,
arch_noeabi=arch.arch.replace('eabi', ''),
pyroot=self.ctx.get_python_install_dir()
)
env['LDSHARED'] = env['CC'] + ' -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions'
env['CFLAGS'] += " -I{pyroot}/include/python2.7 " \
" -I{ctx.ndk_dir}/platforms/android-{ctx.android_api}/arch-{arch_noeabi}/usr/include" \
" -I{ctx.ndk_dir}/sources/cxx-stl/gnu-libstdc++/{ctx.toolchain_version}/include" \
" -I{ctx.ndk_dir}/sources/cxx-stl/gnu-libstdc++/{ctx.toolchain_version}/libs/{arch.arch}/include".format(**keys)
env['CXXFLAGS'] = env['CFLAGS'] + ' -frtti -fexceptions'
env['LDFLAGS'] += " -L{ctx.ndk_dir}/sources/cxx-stl/gnu-libstdc++/{ctx.toolchain_version}/libs/{arch.arch}" \
" -lpython2.7" \
" -lgnustl_shared".format(**keys)
return env
def build_compiled_components(self,arch):
super(CppCompiledComponentsPythonRecipe, self).build_compiled_components(arch)
# Copy libgnustl_shared.so
with current_directory(self.get_build_dir(arch.arch)):
sh.cp(
"{ctx.ndk_dir}/sources/cxx-stl/gnu-libstdc++/{ctx.toolchain_version}/libs/{arch.arch}/libgnustl_shared.so".format(ctx=self.ctx,arch=arch),
self.ctx.get_libs_dir(arch.arch)
)
class CythonRecipe(PythonRecipe):
pre_build_ext = False
cythonize = True
cython_args = []
def __init__(self, *args, **kwargs):
super(CythonRecipe, self).__init__(*args, **kwargs)
depends = self.depends
depends.append(('python2', 'python3crystax'))
depends = list(set(depends))
self.depends = depends
def build_arch(self, arch):
'''Build any cython components, then install the Python module by
calling setup.py install with the target Python dir.
'''
Recipe.build_arch(self, arch)
self.build_cython_components(arch)
self.install_python_package(arch)
def build_cython_components(self, arch):
info('Cythonizing anything necessary in {}'.format(self.name))
env = self.get_recipe_env(arch)
if self.ctx.python_recipe.from_crystax:
command = sh.Command('python{}'.format(self.ctx.python_recipe.version))
site_packages_dirs = command(
'-c', 'import site; print("\\n".join(site.getsitepackages()))')
site_packages_dirs = site_packages_dirs.stdout.decode('utf-8').split('\n')
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = env['PYTHONPATH'] + ':{}'.format(':'.join(site_packages_dirs))
else:
env['PYTHONPATH'] = ':'.join(site_packages_dirs)
with current_directory(self.get_build_dir(arch.arch)):
hostpython = sh.Command(self.ctx.hostpython)
shprint(hostpython, '-c', 'import sys; print(sys.path)', _env=env)
print('cwd is', realpath(curdir))
info('Trying first build of {} to get cython files: this is '
'expected to fail'.format(self.name))
manually_cythonise = False
try:
shprint(hostpython, 'setup.py', 'build_ext', '-v', _env=env,
*self.setup_extra_args)
except sh.ErrorReturnCode_1:
print()
info('{} first build failed (as expected)'.format(self.name))
manually_cythonise = True
if manually_cythonise:
self.cythonize_build(env=env)
shprint(hostpython, 'setup.py', 'build_ext', '-v', _env=env,
_tail=20, _critical=True, *self.setup_extra_args)
else:
info('First build appeared to complete correctly, skipping manual'
'cythonising.')
if 'python2' in self.ctx.recipe_build_order:
info('Stripping object files')
build_lib = glob.glob('./build/lib*')
shprint(sh.find, build_lib[0], '-name', '*.o', '-exec',
env['STRIP'], '{}', ';', _env=env)
if 'python3crystax' in self.ctx.recipe_build_order:
info('Stripping object files')
shprint(sh.find, '.', '-iname', '*.so', '-exec',
'/usr/bin/echo', '{}', ';', _env=env)
shprint(sh.find, '.', '-iname', '*.so', '-exec',
env['STRIP'].split(' ')[0], '--strip-unneeded',
# '/usr/bin/strip', '--strip-unneeded',
'{}', ';', _env=env)
def cythonize_file(self, env, build_dir, filename):
short_filename = filename
if filename.startswith(build_dir):
short_filename = filename[len(build_dir) + 1:]
info(u"Cythonize {}".format(short_filename))
cyenv = env.copy()
if 'CYTHONPATH' in cyenv:
cyenv['PYTHONPATH'] = cyenv['CYTHONPATH']
elif 'PYTHONPATH' in cyenv:
del cyenv['PYTHONPATH']
if 'PYTHONNOUSERSITE' in cyenv:
cyenv.pop('PYTHONNOUSERSITE')
cython = 'cython' if self.ctx.python_recipe.from_crystax else self.ctx.cython
cython_command = sh.Command(cython)
shprint(cython_command, filename, *self.cython_args, _env=cyenv)
def cythonize_build(self, env, build_dir="."):
if not self.cythonize:
info('Running cython cancelled per recipe setting')
return
info('Running cython where appropriate')
for root, dirnames, filenames in walk("."):
for filename in fnmatch.filter(filenames, "*.pyx"):
self.cythonize_file(env, build_dir, join(root, filename))
def get_recipe_env(self, arch, with_flags_in_cc=True):
env = super(CythonRecipe, self).get_recipe_env(arch, with_flags_in_cc)
env['LDFLAGS'] = env['LDFLAGS'] + ' -L{} '.format(
self.ctx.get_libs_dir(arch.arch) +
' -L{} '.format(self.ctx.libs_dir) +
' -L{}'.format(join(self.ctx.bootstrap.build_dir, 'obj', 'local',
arch.arch)))
if self.ctx.python_recipe.from_crystax:
env['LDFLAGS'] = (env['LDFLAGS'] +
' -L{}'.format(join(self.ctx.bootstrap.build_dir, 'libs', arch.arch)))
# ' -L/home/asandy/.local/share/python-for-android/build/bootstrap_builds/sdl2/libs/armeabi '
if self.ctx.python_recipe.from_crystax:
env['LDSHARED'] = env['CC'] + ' -shared'
else:
env['LDSHARED'] = join(self.ctx.root_dir, 'tools', 'liblink.sh')
# shprint(sh.whereis, env['LDSHARED'], _env=env)
env['LIBLINK'] = 'NOTNONE'
env['NDKPLATFORM'] = self.ctx.ndk_platform
if self.ctx.copy_libs:
env['COPYLIBS'] = '1'
# Every recipe uses its own liblink path, object files are
# collected and biglinked later
liblink_path = join(self.get_build_container_dir(arch.arch),
'objects_{}'.format(self.name))
env['LIBLINK_PATH'] = liblink_path
ensure_dir(liblink_path)
if self.ctx.python_recipe.from_crystax:
env['CFLAGS'] = '-I{} '.format(
join(self.ctx.ndk_dir, 'sources', 'python',
self.ctx.python_recipe.version, 'include',
'python')) + env['CFLAGS']
# Temporarily hardcode the -lpython3.x as this does not
# get applied automatically in some environments. This
# will need generalising, along with the other hardcoded
# py3.5 references, to support other python3 or crystax
# python versions.
python3_version = self.ctx.python_recipe.version
python3_version = '.'.join(python3_version.split('.')[:2])
env['LDFLAGS'] = env['LDFLAGS'] + ' -lpython{}m'.format(python3_version)
return env
class TargetPythonRecipe(Recipe):
'''Class for target python recipes. Sets ctx.python_recipe to point to
itself, so as to know later what kind of Python was built or used.'''
from_crystax = False
'''True if the python is used from CrystaX, False otherwise (i.e. if
it is built by p4a).'''
def __init__(self, *args, **kwargs):
self._ctx = None
super(TargetPythonRecipe, self).__init__(*args, **kwargs)
def prebuild_arch(self, arch):
super(TargetPythonRecipe, self).prebuild_arch(arch)
if self.from_crystax and self.ctx.ndk != 'crystax':
error('The {} recipe can only be built when '
'using the CrystaX NDK. Exiting.'.format(self.name))
exit(1)
self.ctx.python_recipe = self
# @property
# def ctx(self):
# return self._ctx
# @ctx.setter
# def ctx(self, ctx):
# self._ctx = ctx
# ctx.python_recipe = self
def md5sum(filen):
'''Calculate the md5sum of a file.
'''
with open(filen, 'rb') as fileh:
md5 = hashlib.md5(fileh.read())
return md5.hexdigest()
|
|
"""
Created on Jun 15, 2013
:author: andrei
"""
import pickle
from bioflow.utils.log_behavior import get_logger
from bioflow.configs.main_configs import Dumps, reactome_biopax_path
from bioflow.configs.main_configs import reactome_forbidden_nodes
from bioflow.neo4j_db.GraphDeclarator import DatabaseGraph
from bioflow.bio_db_parsers.reactomeParser import ReactomeParser
log = get_logger(__name__)
memoization_dict = {} # accelerated access pointer to the objects
ForbiddenIDs = []
def insert_cell_locations(cell_locations_dict):
"""
Creates nodes corresponding to cell locations
:param cell_locations_dict:
"""
for Loc, displayName in cell_locations_dict.items():
memoization_dict[Loc] = DatabaseGraph.create('Location',
{'legacyID': Loc,
'displayName': displayName,
'parse_type': 'annotation',
'source': 'Reactome'})
def insert_minimal_annotations(annotated_node, annot_type_2_annot_list, source):
"""
Inserts a minimal annotation provided the annotated_node Node (it requires the direct,
local DB ID and thus) needs to be inserted at the same time as the annotated object
:param annotated_node:
:param annot_type_2_annot_list:
"""
DatabaseGraph.attach_all_node_annotations(annotated_node.id,
annot_type_2_annot_list,
source=source)
def insert_reactome_class(neo4j_graph_class, reactome_obj_id_2_property_dict, parse_type):
"""
Inserst a Meta-Object (I.e. any physical entity or collection thereof) as a member of a
bulbs class and pumping the bioflow information from the property bioflow
:param neo4j_graph_class:
:param reactome_obj_id_2_property_dict:
"""
size = len(list(reactome_obj_id_2_property_dict.keys()))
log.info('Starting inserting %s with %s elements', neo4j_graph_class, size)
breakpoints = 300
for i, (reactome_id, property_dict) in enumerate(reactome_obj_id_2_property_dict.items()):
if i % breakpoints == 0:
# TODO: [progress bar]
log.info('\t %.2f %%' % (float(i) / float(size) * 100.0))
reactome_obj_properties = {'legacyID': reactome_id,
'displayName': property_dict['displayName'],
'localization':
memoization_dict[property_dict['cellularLocation']]['displayName'],
'source': 'Reactome',
'parse_type': parse_type,
'main_connex': False}
primary = DatabaseGraph.create(neo4j_graph_class, reactome_obj_properties)
log.debug(primary)
log.debug(dir(primary))
log.debug("%s\n%s\n%s\n%s\n%s" %
(primary._properties,
primary.values(),
primary.labels,
primary.keys(),
primary.items()))
# print(primary)
# print(dir(primary))
# print(primary._properties, '\n',
# primary.values(), '\n',
# primary.labels, '\n',
# primary.keys(), '\n',
# primary.items())
if reactome_id in reactome_forbidden_nodes:
ForbiddenIDs.append(primary.id)
memoization_dict[reactome_id] = primary
insert_minimal_annotations(primary,
property_dict['references'],
source='Reactome')
if 'cellularLocation' in list(property_dict.keys()):
secondary = memoization_dict[property_dict['cellularLocation']]
DatabaseGraph.link(primary.id,
secondary.id,
'is_localized',
{'source': 'Reactome',
'parse_type': 'annotates'})
if 'modification' in list(property_dict.keys()):
for modification in property_dict['modification']:
if 'location' in list(modification.keys()) and 'modification' in list(modification.keys()):
located_modification = DatabaseGraph.create('ModificationFeature',
{'legacyID': modification['ID'],
'type': 'post-translational_Mod',
'location': modification['location'],
'displayName': modification['modification'],
'source': 'Reactome',
'parse_type': 'physical_entity'})
DatabaseGraph.link(primary.id,
located_modification.id,
'is_able_to_modify',
{'source_note': 'Reactome_modification',
'source': 'Reactome',
'parse_type': 'refines'}
)
def insert_collections(collections_2_members):
"""
Links a collection object reference to the members of the collection.
:param collections_2_members:
"""
breakpoints = 300
size = len(list(collections_2_members.keys()))
for i, (collection, collection_property_dict) in enumerate(collections_2_members.items()):
if i % breakpoints == 0:
log.info('\t %.2f %%' % (float(i) / float(size) * 100))
for member in collection_property_dict['collectionMembers']:
collection_node = memoization_dict[collection]
member_node = memoization_dict[member]
DatabaseGraph.link(collection_node.id,
member_node.id,
'is_part_of_collection',
{'source': 'Reactome',
'parse_type': 'refines',
'source_note': 'Reactome_collection'
})
def insert_complex_parts(complex_property_dict):
"""
Links part of a complex to the complex
:param complex_property_dict:
"""
breakpoint = 300
size = len(list(complex_property_dict.keys()))
for i, key in enumerate(complex_property_dict.keys()):
if i % breakpoint == 0:
log.info('\t %.2f %%' % (float(i) / float(size) * 100.0))
for part in complex_property_dict[key]['parts']:
if 'Stoichiometry' not in part:
complex_node = memoization_dict[key]
part_node = memoization_dict[part]
DatabaseGraph.link(complex_node.id, part_node.id, 'is_part_of_complex',
{'source': 'Reactome',
'parse_type': 'physical_entity_molecular_interaction',
'source_note': 'Reactome_complex'
})
def insert_reactions(neo4j_graph_class, property_source_dict):
"""
Inserts a Reaction-Object (I.e. any reaction or type of reactions) as a member of a bulbs
class and pumping the bioflow information from the property bioflow
:param neo4j_graph_class:
:param property_source_dict:
"""
for reaction, reaction_properties in property_source_dict.items():
memoization_dict[reaction] = DatabaseGraph.create(neo4j_graph_class,
{'legacyID': reaction,
'displayName': reaction_properties['displayName'],
'source': 'Reactome',
'parse_type': 'physical_entity'})
insert_minimal_annotations(
memoization_dict[reaction],
reaction_properties['references'],
source='Reactome')
for property_name, property_value_list in reaction_properties.items():
if property_name in ['left', 'right']:
for elt in property_value_list:
reaction_node = memoization_dict[reaction]
elt_node = memoization_dict[elt]
DatabaseGraph.link(reaction_node.id,
elt_node.id,
'is_reaction_participant',
{'side': property_name,
'source_note': 'Reactome_reaction',
'source': 'Reactome',
'parse_type': 'physical_entity_molecular_interaction'})
# TODO: catalysis need to be inserted as nodes and then cross-linked, for better compatibility with
# the Pathway searc
def insert_catalysis(catalysises_dict):
"""
Inserts all the catalysis links from one meta-element to an another
:param catalysises_dict:
"""
for catalysis, catalysis_properties in catalysises_dict.items():
if 'controller' in list(catalysis_properties.keys()) \
and 'controlled' in list(catalysis_properties.keys()):
if catalysis_properties['controlled'] in list(memoization_dict.keys()) \
and catalysis_properties['controller'] in list(memoization_dict.keys()):
if 'ControlType' not in list(catalysises_dict[catalysis].keys()):
catalysis_properties['ControlType'] = 'UNKNOWN'
controller = memoization_dict[catalysis_properties['controller']] #
controlled = memoization_dict[catalysis_properties['controlled']] #
memoization_dict[catalysis] = DatabaseGraph.link(
controller.id,
controlled.id,
'is_catalysant',
{'legacyID': catalysis,
'controlType': catalysis_properties['ControlType'],
'source': 'Reactome',
'source_note': 'Reactome_catalysis',
'parse_type': 'physical_entity_molecular_interaction'
})
else:
log.debug("Catalysis targets not memoized: %s : %s, %s, %s", catalysis,
catalysises_dict[catalysis],
catalysises_dict[catalysis]['controlled'] in list(memoization_dict.keys()),
catalysises_dict[catalysis]['controller'] in list(memoization_dict.keys()))
else:
log.debug("Catalysis without control/controlled %s : %s, %s, %s,",
catalysis, catalysises_dict[catalysis],
'controller' in list(catalysises_dict[catalysis].keys()),
'controlled' in list(catalysises_dict[catalysis].keys()))
def insert_modulation(modulations_dict):
"""
Inserts all the Modulation links from one meta-element to an another
:param modulations_dict:
"""
for modulation, modulation_property_dict in modulations_dict.items():
controller = memoization_dict[modulation_property_dict['controller']] #
controlled = memoization_dict[modulation_property_dict['controlled']] #
memoization_dict[modulation] = DatabaseGraph.link(
controller.id,
controlled.id,
'is_regulant',
{'legacyID': modulation,
'controlType': modulation_property_dict['controlType'],
'source': 'Reactome',
'source_note': 'Reactome_modulation',
'parse_type': 'physical_entity_molecular_interaction'
}
)
def insert_pathways(pathway_steps, pathways):
"""
Inserts all the Pathways, linking and chaining subpathways
Attention, it have to be imported at the same time as the reactions.
:param pathway_steps:
:param pathways:
"""
breakpoints = 300
ps_len = len(list(pathway_steps.keys()))
p_len = len(list(pathways.keys()))
log.info('Inserting Pathway steps with %s elements', len(list(pathway_steps.keys())))
for i, pathway_step in enumerate(pathway_steps.keys()):
if i % breakpoints == 0:
log.info('\t %.2f %%' % (float(i) / float(ps_len) * 100))
memoization_dict[pathway_step] = DatabaseGraph.create(
'PathwayStep',
{'legacyID': pathway_step,
'displayName': pathway_steps[pathway_step].get('displayName', pathway_step),
'source': 'Reactome',
'parse_type': 'annotation'})
log.info('Inserting Pathways with %s elements', len(list(pathways.keys())))
# TODO: [reactome pathways sanity] links are inverted
for i, pathway in enumerate(list(pathways.keys())):
if i % breakpoints == 0:
log.info('\t %.2f %%' % (float(i) / float(p_len) * 100))
# print(pathways[pathway])
memoization_dict[pathway] = DatabaseGraph.create(
'Pathway',
{'legacyID': pathway,
'displayName': pathways[pathway].get('displayName', pathway),
'source': 'Reactome',
'parse_type': 'annotation'})
for i, pathway_step in enumerate(pathway_steps.keys()):
for component in pathway_steps[pathway_step]['components']:
parse_type = 'annotation_relationship'
if memoization_dict[component]['parse_type'] == 'physical_entity':
parse_type = 'annotates'
DatabaseGraph.link(memoization_dict[component].id,
memoization_dict[pathway_step].id,
'is_part_of_pathway',
{'source_note': 'Reactome_pathway',
'source': 'Reactome',
'parse_type': parse_type
})
for next_step in pathway_steps[pathway_step]['nextStep']:
# only links pathway steps
DatabaseGraph.link(memoization_dict[pathway_step].id,
memoization_dict[next_step].id,
'is_next_in_pathway',
{'source_note': 'Reactome_pathway',
'source': 'Reactome',
'parse_type': 'annotation_relationship'
})
for pathway in list(pathways.keys()):
for second_pathway in pathways[pathway]['PathwayStep']:
# only links to pathway steps
DatabaseGraph.link(memoization_dict[second_pathway].id,
memoization_dict[pathway].id,
'is_part_of_pathway',
{'source_note': 'Reactome_pathway',
'source': 'Reactome',
'parse_type': 'annotation_relationship'
})
for sub_pathway in pathways[pathway]['components']:
parse_type = 'annotation_relationship'
if memoization_dict[sub_pathway]['parse_type'] == 'physical_entity':
parse_type = 'annotates'
DatabaseGraph.link(memoization_dict[sub_pathway].id,
memoization_dict[pathway].id,
'is_part_of_pathway',
{'source_note': 'Reactome_pathway',
'source': 'Reactome',
'parse_type': parse_type
})
def re_memoize_reactome_nodes():
"""
In case the Reactome meta objects were already inserted, reloads them all to the local
dictionary for further annotation insertion
"""
reactome_nodes = DatabaseGraph.find(filter_dict={'source': 'Reactome'})
memoization_dict.update({node['legacyID']: node for node in reactome_nodes})
def insert_reactome(skip_import='N'):
"""
Performs the massive import of the Reactome database into the local neo4j database.
:param skip_import: * N => will skip nothing and implement the import once and for all.
* M => skips meta import, recovers the metas and resumes from the Reactions
import.
"""
reactome_parser = ReactomeParser(reactome_biopax_path)
reactome_parser.parse_all()
if skip_import == 'N':
insert_cell_locations(reactome_parser.CellularLocations)
insert_reactome_class('DNA',
reactome_parser.Dnas, 'physical_entity')
insert_reactome_class("DNA_Collection",
reactome_parser.Dna_Collections, 'physical_entity')
insert_reactome_class("RNA",
reactome_parser.Rnas, 'physical_entity')
insert_reactome_class("RNA_Collection",
reactome_parser.Rna_Collections, 'physical_entity')
insert_reactome_class("SmallMolecule",
reactome_parser.SmallMolecules, 'physical_entity')
insert_reactome_class("SmallMolecule_Collection",
reactome_parser.SmallMolecule_Collections, 'physical_entity')
insert_reactome_class("Protein",
reactome_parser.Proteins, 'physical_entity')
insert_reactome_class("Protein_Collection",
reactome_parser.Protein_Collections, 'physical_entity')
insert_reactome_class("Complex",
reactome_parser.Complexes, 'physical_entity')
insert_reactome_class("Complex_Collection",
reactome_parser.Complex_Collections, 'physical_entity')
insert_reactome_class("PhysicalEntity",
reactome_parser.PhysicalEntities, 'physical_entity')
insert_reactome_class("PhysicalEntity_Collection",
reactome_parser.PhysicalEntity_Collections, 'physical_entity')
log.info('Inserting DNA Collections with %s elements'
% len(reactome_parser.Dna_Collections))
insert_collections(reactome_parser.Dna_Collections)
log.info('Inserting RNA Collections with %s elements'
% len(reactome_parser.Rna_Collections))
insert_collections(reactome_parser.Rna_Collections)
log.info('Inserting Small Molecule Collections with %s elements'
% len(reactome_parser.SmallMolecule_Collections))
insert_collections(reactome_parser.SmallMolecule_Collections)
log.info('Inserting Protein Collections with %s elements'
% len(reactome_parser.Protein_Collections))
insert_collections(reactome_parser.Protein_Collections)
log.info('Inserting Complex Collections with %s elements'
% len(reactome_parser.Complex_Collections))
insert_collections(reactome_parser.Complex_Collections)
log.info('Inserting Physical Entity Collections with %s elements'
% len(reactome_parser.PhysicalEntity_Collections))
insert_collections(reactome_parser.PhysicalEntity_Collections)
log.info('Inserting Complexes with %s elements'
% (len(reactome_parser.Complexes)))
insert_complex_parts(reactome_parser.Complexes)
if skip_import == 'M':
re_memoize_reactome_nodes()
# print memoization_dict.keys()
# Meta insert/retrieval finished
log.info('Inserting Template Reactions with %s elements'
% len(reactome_parser.TemplateReactions))
insert_reactions("Template_Reaction",
reactome_parser.TemplateReactions)
log.info('Inserting Degradations with %s elements'
% len(reactome_parser.Degradations))
insert_reactions("Degradation",
reactome_parser.Degradations)
log.info('Inserting Biochemical Reactions with %s elements'
% len(reactome_parser.BiochemicalReactions))
insert_reactions("BiochemicalReaction",
reactome_parser.BiochemicalReactions)
# Reaction insert finished
log.info('Inserting Catalyses with %s elements'
% len(reactome_parser.Catalysises))
insert_catalysis(reactome_parser.Catalysises)
# log.info('Inserting Modulations with %s elements' # ceased to exist
# % len(reactome_parser.Modulations))
# insert_modulation(reactome_parser.Modulations)
insert_pathways(reactome_parser.PathwaySteps,
reactome_parser.Pathways)
# Q: There is no linking from the actual entities to pathways in Reactome?
# There are, but through pathway steps.
|
|
from pprint import pprint
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.template import Context, Template, TemplateSyntaxError, VariableDoesNotExist
from django.test import RequestFactory
from template_analyzer import get_node_instances
from fluent_contents import appsettings
from fluent_contents.analyzer import get_template_placeholder_data
from fluent_contents.models import Placeholder
from fluent_contents.templatetags.fluent_contents_tags import PagePlaceholderNode
from fluent_contents.tests.testapp.models import (
PlaceholderFieldTestPage,
RawHtmlTestItem,
TestPage,
)
from fluent_contents.tests.utils import AppTestCase
class TemplateTagTests(AppTestCase):
"""
Test cases for template tags
"""
dummy_request = RequestFactory().get("/")
dummy_request.user = AnonymousUser()
install_apps = ("fluent_contents.tests.testapp",)
def test_page_placeholder_metadata(self):
"""
The ``page_placeholder`` tag should expose metadata, which ``fluent_contents.analyzer`` can read.
"""
template = Template(
"""{% load fluent_contents_tags %}{% page_placeholder page "slot1" title="SlotTest1" role="s" %}"""
)
# Test raw Placeholder extraction
raw_placeholders = get_node_instances(template, PagePlaceholderNode)
self.assertEqual(len(raw_placeholders), 1)
self.assertEqual(raw_placeholders[0].get_slot(), "slot1")
self.assertEqual(raw_placeholders[0].get_title(), "SlotTest1")
self.assertEqual(raw_placeholders[0].get_role(), "s")
# Now test the public API, that returns PlaceholderData objects.
data = get_template_placeholder_data(template)
self.assertEqual(len(data), 1)
self.assertEqual(data[0].slot, "slot1")
self.assertEqual(data[0].title, "SlotTest1")
self.assertEqual(data[0].role, "s")
# Test2: fallback code
template = Template(
"""{% load fluent_contents_tags %}{% page_placeholder page "slot_test2" %}"""
)
# Test raw Placeholder extraction
raw_placeholders = get_node_instances(template, PagePlaceholderNode)
self.assertEqual(len(raw_placeholders), 1)
self.assertEqual(raw_placeholders[0].get_slot(), "slot_test2")
self.assertEqual(raw_placeholders[0].get_title(), "Slot Test2")
self.assertEqual(raw_placeholders[0].get_role(), None)
# Test the public API
data = get_template_placeholder_data(template)
self.assertEqual(len(data), 1)
self.assertEqual(data[0].slot, "slot_test2")
self.assertEqual(data[0].title, "Slot Test2")
self.assertEqual(data[0].role, "m") # Defaults to "main"
def test_page_placeholder(self):
"""
The ``page_placeholder`` tag should render the content associated with it.
"""
# Attach contents to the parent object.
page1 = TestPage.objects.create(contents="TEST!")
placeholder1 = Placeholder.objects.create_for_object(page1, "slot1")
item1 = RawHtmlTestItem.objects.create_for_placeholder(
placeholder1, html="<b>Item1!</b>", sort_order=1
)
item2 = RawHtmlTestItem.objects.create_for_placeholder(
placeholder1, html="<b>Item2!</b>", sort_order=2
)
# Test standard output
html = self._render(
"""{% load fluent_contents_tags %}{% page_placeholder page1 "slot1" %}""",
{"page1": page1},
)
self.assertEqual(html, "<b>Item1!</b><b>Item2!</b>")
# Test standard output + template variable
html = self._render(
"""{% load fluent_contents_tags %}{% page_placeholder page1 "slot1" template="testapp/placeholder_splitter.html" %}""",
{"page1": page1},
)
self.assertEqual(
html.replace("\n", ""),
'<b>Item1!</b><div class="splitter"></div><b>Item2!</b>',
)
# Test if the "page" variable is used as default argument
html = self._render(
"""{% load fluent_contents_tags %}{% page_placeholder "slot1" %}""",
{"page": page1},
)
self.assertEqual(html, "<b>Item1!</b><b>Item2!</b>")
# Test of invalid slots fail silently. Give the user the chance to enter the data in the CMS.
html = self._render(
"""{% load fluent_contents_tags %}{% page_placeholder page1 "invalid_slot1" %}""",
{"page1": page1},
)
self.assertEqual(html, "<!-- placeholder 'invalid_slot1' does not yet exist -->")
# Test if a missing "page" variable fails.
self.assertRaises(
VariableDoesNotExist,
lambda: self._render(
"""{% load fluent_contents_tags %}{% page_placeholder "slot1" %}""", {}
),
)
# Test if a missing arguments are reported
self.assertRaises(
TemplateSyntaxError,
lambda: Template("""{% load fluent_contents_tags %}{% page_placeholder %}"""),
)
self.assertRaises(
TemplateSyntaxError,
lambda: Template(
"""{% load fluent_contents_tags %}{% page_placeholder arg1 arg2 arg3 %}"""
),
)
def test_render_placeholder(self):
"""
The ``render_placeholder`` tag should render objects by reference.
"""
# Attach contents to the parent object.
page2 = PlaceholderFieldTestPage.objects.create()
placeholder1 = Placeholder.objects.create_for_object(page2, "field_slot1")
item1 = RawHtmlTestItem.objects.create_for_placeholder(
placeholder1, html="<b>Item1!</b>", sort_order=1
)
item2 = RawHtmlTestItem.objects.create_for_placeholder(
placeholder1, html="<b>Item2!</b>", sort_order=2
)
# Make sure the unittests won't succeed because of cached output.
appsettings.FLUENT_CONTENTS_CACHE_OUTPUT = False
cache.clear()
# Test standard behavior, with an object reference
# - fetch ContentItem
# - fetch RawHtmlTestItem
with self.assertNumQueries(2) as ctx:
html = self._render(
"""{% load fluent_contents_tags %}{% render_placeholder placeholder1 %}""",
{"placeholder1": placeholder1},
)
self.assertEqual(html, "<b>Item1!</b><b>Item2!</b>")
# Test passing Placeholder via PlaceholderField (actually tests the PlaceholderFieldDescriptor)
# - fetch Placeholder
# - fetch ContentItem
# - fetch RawHtmlTestItem
with self.assertNumQueries(3) as ctx:
html = self._render(
"""{% load fluent_contents_tags %}{% render_placeholder page2.contents %}""",
{"page2": page2},
)
self.assertEqual(html, "<b>Item1!</b><b>Item2!</b>")
# Test passing a related object manager.
# - fetch Placeholder
# - parent is taken from RelatedManager
# - fetch ContentItem
# - fetch RawHtmlTestItem
with self.assertNumQueries(3) as ctx:
html = self._render(
"""{% load fluent_contents_tags %}{% render_placeholder page2.placeholder_set %}""",
{"page2": page2},
)
self.assertEqual(html, "<b>Item1!</b><b>Item2!</b>")
# Test if None values fail silently
with self.assertNumQueries(0) as ctx:
html = self._render(
"""{% load fluent_contents_tags %}{% render_placeholder none_object %}""",
{"none_object": None},
)
self.assertEqual(html, "<!-- placeholder object is None -->")
# Test if invalid objects are reported.
self.assertRaises(
ValueError,
lambda: self._render(
"""{% load fluent_contents_tags %}{% render_placeholder 123 %}""", {}
),
)
self.assertRaises(
ValueError,
lambda: self._render(
"""{% load fluent_contents_tags %}{% render_placeholder int_object %}""",
{"int_object": 456},
),
)
# Test if a missing arguments are reported
self.assertRaises(
TemplateSyntaxError,
lambda: Template("""{% load fluent_contents_tags %}{% render_placeholder %}"""),
)
self.assertRaises(
TemplateSyntaxError,
lambda: Template(
"""{% load fluent_contents_tags %}{% render_placeholder arg1 arg2 %}"""
),
)
def test_num_item_queries(self):
page3 = PlaceholderFieldTestPage.objects.create()
placeholder1 = Placeholder.objects.create_for_object(page3, "field_slot1")
item1 = RawHtmlTestItem.objects.create_for_placeholder(
placeholder1, html="<b>Item1!</b>", sort_order=1
)
item2 = RawHtmlTestItem.objects.create_for_placeholder(
placeholder1, html="<b>Item2!</b>", sort_order=2
)
appsettings.FLUENT_CONTENTS_CACHE_OUTPUT = True
appsettings.FLUENT_CONTENTS_CACHE_PLACEHOLDER_OUTPUT = False # No full caching
cache.clear()
# First time:
# - fetch ContentItem
# - fetch RawHtmlTestItem
with self.assertNumQueries(2) as ctx:
self._render(
"""{% load fluent_contents_tags %}{% render_placeholder placeholder1 %}""",
{"placeholder1": placeholder1},
)
# pprint(ctx.captured_queries)
# Second time
# - fetch ContentItem
with self.assertNumQueries(1) as ctx:
self._render(
"""{% load fluent_contents_tags %}{% render_placeholder placeholder1 %}""",
{"placeholder1": placeholder1},
)
# pprint(ctx.captured_queries)
# Using page_placeholder
# - fetch Placeholder
# - fetch ContentItem
with self.assertNumQueries(2) as ctx:
self._render(
"""{% load fluent_contents_tags %}{% page_placeholder 'field_slot1' %}""",
{"page": page3},
)
# pprint(ctx.captured_queries)
# Using page_placeholder, use fallback
# - fetch Placeholder
# - fetch ContentItem
# - fetch ContentItem fallback
with self.assertNumQueries(2) as ctx:
self._render(
"""{% load fluent_contents_tags %}{% page_placeholder 'field_slot1' fallback=True %}""",
{"page": page3},
)
# pprint(ctx.captured_queries)
def test_num_placeholder_queries(self):
page3 = PlaceholderFieldTestPage.objects.create()
placeholder1 = Placeholder.objects.create_for_object(page3, "field_slot1")
item1 = RawHtmlTestItem.objects.create_for_placeholder(
placeholder1, html="<b>Item1!</b>", sort_order=1
)
item2 = RawHtmlTestItem.objects.create_for_placeholder(
placeholder1, html="<b>Item2!</b>", sort_order=2
)
appsettings.FLUENT_CONTENTS_CACHE_OUTPUT = True
appsettings.FLUENT_CONTENTS_CACHE_PLACEHOLDER_OUTPUT = True
cache.clear()
# First time:
# - fetch ContentItem
# - fetch RawHtmlTestItem
with self.assertNumQueries(2) as ctx:
self._render(
"""{% load fluent_contents_tags %}{% render_placeholder placeholder1 %}""",
{"placeholder1": placeholder1},
)
# pprint(ctx.captured_queries)
# Second time
with self.assertNumQueries(0) as ctx:
self._render(
"""{% load fluent_contents_tags %}{% render_placeholder placeholder1 %}""",
{"placeholder1": placeholder1},
)
# pprint(ctx.captured_queries)
# Using page_placeholder
with self.assertNumQueries(0) as ctx:
self._render(
"""{% load fluent_contents_tags %}{% page_placeholder 'field_slot1' %}""",
{"page": page3},
)
# pprint(ctx.captured_queries)
# Using page_placeholder, use fallback
with self.assertNumQueries(0) as ctx:
self._render(
"""{% load fluent_contents_tags %}{% page_placeholder 'field_slot1' fallback=True %}""",
{"page": page3},
)
# pprint(ctx.captured_queries)
def _render(self, template_code, context_data):
"""
Render a template
"""
template = Template(template_code)
context = Context(context_data)
context["request"] = self.dummy_request
return template.render(context)
|
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Backup manager manages volume backups.
Volume Backups are full copies of persistent volumes stored in a backup
store e.g. an object store or any other backup store if and when support is
added. They are usable without the original object being available. A
volume backup can be restored to the original volume it was created from or
any other available volume with a minimum size of the original volume.
Volume backups can be created, restored, deleted and listed.
**Related Flags**
:backup_topic: What :mod:`rpc` topic to listen to (default:
`cinder-backup`).
:backup_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.backup.manager.Manager`).
"""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from cinder.backup import driver
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import manager
from cinder import objects
from cinder import quota
from cinder import rpc
from cinder import utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
backup_manager_opts = [
cfg.StrOpt('backup_driver',
default='cinder.backup.drivers.swift',
help='Driver to use for backups.',)
]
# This map doesn't need to be extended in the future since it's only
# for old backup services
mapper = {'cinder.backup.services.swift': 'cinder.backup.drivers.swift',
'cinder.backup.services.ceph': 'cinder.backup.drivers.ceph'}
CONF = cfg.CONF
CONF.register_opts(backup_manager_opts)
QUOTAS = quota.QUOTAS
class BackupManager(manager.SchedulerDependentManager):
"""Manages backup of block storage devices."""
RPC_API_VERSION = '1.2'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, service_name=None, *args, **kwargs):
self.service = importutils.import_module(self.driver_name)
self.az = CONF.storage_availability_zone
self.volume_managers = {}
self._setup_volume_drivers()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
super(BackupManager, self).__init__(service_name='backup',
*args, **kwargs)
@property
def driver_name(self):
"""This function maps old backup services to backup drivers."""
return self._map_service_to_driver(CONF.backup_driver)
def _map_service_to_driver(self, service):
"""Maps services to drivers."""
if service in mapper:
return mapper[service]
return service
@property
def driver(self):
return self._get_driver()
def _get_volume_backend(self, host=None, allow_null_host=False):
if host is None:
if not allow_null_host:
msg = _("NULL host not allowed for volume backend lookup.")
raise exception.BackupFailedToGetVolumeBackend(msg)
else:
LOG.debug("Checking hostname '%s' for backend info.", host)
part = host.partition('@')
if (part[1] == '@') and (part[2] != ''):
backend = part[2]
LOG.debug("Got backend '%s'.", backend)
return backend
LOG.info(_LI("Backend not found in hostname (%s) so using default."),
host)
if 'default' not in self.volume_managers:
# For multi-backend we just pick the top of the list.
return self.volume_managers.keys()[0]
return 'default'
def _get_manager(self, backend):
LOG.debug("Manager requested for volume_backend '%s'.",
backend)
if backend is None:
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
if backend not in self.volume_managers:
msg = (_("Volume manager for backend '%s' does not exist.") %
(backend))
raise exception.BackupFailedToGetVolumeBackend(msg)
return self.volume_managers[backend]
def _get_driver(self, backend=None):
LOG.debug("Driver requested for volume_backend '%s'.",
backend)
if backend is None:
LOG.debug("Fetching default backend.")
backend = self._get_volume_backend(allow_null_host=True)
mgr = self._get_manager(backend)
mgr.driver.db = self.db
return mgr.driver
def _setup_volume_drivers(self):
if CONF.enabled_backends:
for backend in CONF.enabled_backends:
host = "%s@%s" % (CONF.host, backend)
mgr = importutils.import_object(CONF.volume_manager,
host=host,
service_name=backend)
config = mgr.configuration
backend_name = config.safe_get('volume_backend_name')
LOG.debug("Registering backend %(backend)s (host=%(host)s "
"backend_name=%(backend_name)s).",
{'backend': backend, 'host': host,
'backend_name': backend_name})
self.volume_managers[backend] = mgr
else:
default = importutils.import_object(CONF.volume_manager)
LOG.debug("Registering default backend %s.", default)
self.volume_managers['default'] = default
def _init_volume_driver(self, ctxt, driver):
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)."),
{'driver_name': driver.__class__.__name__,
'version': driver.get_version()})
try:
driver.do_setup(ctxt)
driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Error encountered during initialization of "
"driver: %(name)s."),
{'name': driver.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
driver.set_initialized()
def _update_backup_error(self, backup, context, err):
backup.status = 'error'
backup.fail_reason = err
backup.save()
def init_host(self):
"""Run initialization needed for a standalone service."""
ctxt = context.get_admin_context()
for mgr in self.volume_managers.values():
self._init_volume_driver(ctxt, mgr.driver)
LOG.info(_LI("Cleaning up incomplete backup operations."))
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
for volume in volumes:
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
attachments = volume['volume_attachment']
if attachments:
if (volume['status'] == 'backing-up' and
volume['previous_status'] == 'available'):
LOG.info(_LI('Resetting volume %(vol_id)s to previous '
'status %(status)s (was backing-up).'),
{'vol_id': volume['id'],
'status': volume['previous_status']})
mgr = self._get_manager(backend)
for attachment in attachments:
if (attachment['attached_host'] == self.host and
attachment['instance_uuid'] is None):
mgr.detach_volume(ctxt, volume['id'],
attachment['id'])
elif (volume['status'] == 'backing-up' and
volume['previous_status'] == 'in-use'):
LOG.info(_LI('Resetting volume %(vol_id)s to previous '
'status %(status)s (was backing-up).'),
{'vol_id': volume['id'],
'status': volume['previous_status']})
self.db.volume_update(ctxt, volume['id'],
volume['previous_status'])
elif volume['status'] == 'restoring-backup':
LOG.info(_LI('setting volume %s to error_restoring '
'(was restoring-backup).'), volume['id'])
mgr = self._get_manager(backend)
for attachment in attachments:
if (attachment['attached_host'] == self.host and
attachment['instance_uuid'] is None):
mgr.detach_volume(ctxt, volume['id'],
attachment['id'])
self.db.volume_update(ctxt, volume['id'],
{'status': 'error_restoring'})
# TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting)
backups = objects.BackupList.get_all_by_host(ctxt, self.host)
for backup in backups:
if backup['status'] == 'creating':
LOG.info(_LI('Resetting backup %s to error (was creating).'),
backup['id'])
err = 'incomplete backup reset on manager restart'
self._update_backup_error(backup, ctxt, err)
if backup['status'] == 'restoring':
LOG.info(_LI('Resetting backup %s to '
'available (was restoring).'),
backup['id'])
backup.status = 'available'
backup.save()
if backup['status'] == 'deleting':
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
self.delete_backup(ctxt, backup)
self._cleanup_temp_volumes_snapshots(backups)
def _cleanup_temp_volumes_snapshots(self, backups):
# NOTE(xyang): If the service crashes or gets restarted during the
# backup operation, there could be temporary volumes or snapshots
# that are not deleted. Make sure any temporary volumes or snapshots
# create by the backup job are deleted when service is started.
ctxt = context.get_admin_context()
for backup in backups:
try:
volume = self.db.volume_get(ctxt, backup.volume_id)
volume_host = volume_utils.extract_host(volume['host'],
'backend')
backend = self._get_volume_backend(host=volume_host)
mgr = self._get_manager(backend)
except (KeyError, exception.VolumeNotFound):
LOG.debug("Could not find a volume to clean up for "
"backup %s.", backup.id)
continue
if backup.temp_volume_id and backup.status == 'error':
temp_volume = self.db.volume_get(ctxt,
backup.temp_volume_id)
# The temp volume should be deleted directly thru the
# the volume driver, not thru the volume manager.
mgr.driver.delete_volume(temp_volume)
self.db.volume_destroy(ctxt, temp_volume['id'])
if backup.temp_snapshot_id and backup.status == 'error':
temp_snapshot = objects.Snapshot.get_by_id(
ctxt, backup.temp_snapshot_id)
# The temp snapshot should be deleted directly thru the
# volume driver, not thru the volume manager.
mgr.driver.delete_snapshot(temp_snapshot)
with temp_snapshot.obj_as_admin():
self.db.volume_glance_metadata_delete_by_snapshot(
ctxt, temp_snapshot.id)
temp_snapshot.destroy()
def create_backup(self, context, backup):
"""Create volume backups using configured backup service."""
volume_id = backup.volume_id
volume = self.db.volume_get(context, volume_id)
previous_status = volume.get('previous_status', None)
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "create.start")
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
backup.host = self.host
backup.service = self.driver_name
backup.save()
expected_status = 'backing-up'
actual_status = volume['status']
if actual_status != expected_status:
err = _('Create backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, context, err)
raise exception.InvalidVolume(reason=err)
expected_status = 'creating'
actual_status = backup.status
if actual_status != expected_status:
err = _('Create backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, context, err)
backup.save()
raise exception.InvalidBackup(reason=err)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught,
# the volume status will be set back to available and
# the backup status to 'error'
utils.require_driver_initialized(self.driver)
backup_service = self.service.get_backup_driver(context)
self._get_driver(backend).backup_volume(context, backup,
backup_service)
except Exception as err:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': previous_status,
'previous_status': 'error_backing-up'})
self._update_backup_error(backup, context, six.text_type(err))
# Restore the original status.
self.db.volume_update(context, volume_id,
{'status': previous_status,
'previous_status': 'backing-up'})
backup.status = 'available'
backup.size = volume['size']
backup.availability_zone = self.az
backup.save()
LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
self._notify_about_backup_usage(context, backup, "create.end")
def restore_backup(self, context, backup, volume_id):
"""Restore volume backups from configured backup service."""
LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
volume = self.db.volume_get(context, volume_id)
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
self._notify_about_backup_usage(context, backup, "restore.start")
backup.host = self.host
backup.save()
expected_status = 'restoring-backup'
actual_status = volume['status']
if actual_status != expected_status:
err = (_('Restore backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
backup.status = 'available'
backup.save()
raise exception.InvalidVolume(reason=err)
expected_status = 'restoring'
actual_status = backup['status']
if actual_status != expected_status:
err = (_('Restore backup aborted: expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
self._update_backup_error(backup, context, err)
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
if volume['size'] > backup['size']:
LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is '
'larger than backup: %(backup_id)s, '
'size: %(backup_size)d, continuing with restore.'),
{'vol_id': volume['id'],
'vol_size': volume['size'],
'backup_id': backup['id'],
'backup_size': backup['size']})
backup_service = self._map_service_to_driver(backup['service'])
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Restore backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') % {
'configured_service': configured_service,
'backup_service': backup_service,
}
backup.status = 'available'
backup.save()
self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught,
# the volume status will be set back to available and
# the backup status to 'error'
utils.require_driver_initialized(self.driver)
backup_service = self.service.get_backup_driver(context)
self._get_driver(backend).restore_backup(context, backup,
volume,
backup_service)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_restoring'})
backup.status = 'available'
backup.save()
self.db.volume_update(context, volume_id, {'status': 'available'})
backup.status = 'available'
backup.save()
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.'),
{'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "restore.end")
def delete_backup(self, context, backup):
"""Delete volume backup from configured backup service."""
LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the backup status updated. Fail early since there
# are no other status to change but backup's
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized as err:
with excutils.save_and_reraise_exception():
self._update_backup_error(backup, context, six.text_type(err))
self._notify_about_backup_usage(context, backup, "delete.start")
backup.host = self.host
backup.save()
expected_status = 'deleting'
actual_status = backup.status
if actual_status != expected_status:
err = _('Delete_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') \
% {'expected_status': expected_status,
'actual_status': actual_status}
self._update_backup_error(backup, context, err)
raise exception.InvalidBackup(reason=err)
backup_service = self._map_service_to_driver(backup['service'])
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Delete backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].')\
% {'configured_service': configured_service,
'backup_service': backup_service}
self._update_backup_error(backup, context, err)
raise exception.InvalidBackup(reason=err)
try:
backup_service = self.service.get_backup_driver(context)
backup_service.delete(backup)
except Exception as err:
with excutils.save_and_reraise_exception():
self._update_backup_error(backup, context,
six.text_type(err))
# Get reservations
try:
reserve_opts = {
'backups': -1,
'backup_gigabytes': -backup.size,
}
reservations = QUOTAS.reserve(context,
project_id=backup.project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting backup"))
backup.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations,
project_id=backup.project_id)
LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id)
self._notify_about_backup_usage(context, backup, "delete.end")
def _notify_about_backup_usage(self,
context,
backup,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_backup_usage(
context, backup, event_suffix,
extra_usage_info=extra_usage_info,
host=self.host)
def export_record(self, context, backup):
"""Export all volume backup metadata details to allow clean import.
Export backup metadata so it could be re-imported into the database
without any prerequisite in the backup database.
:param context: running context
:param backup: backup object to export
:returns: backup_record - a description of how to import the backup
:returns: contains 'backup_url' - how to import the backup, and
:returns: 'backup_service' describing the needed driver.
:raises: InvalidBackup
"""
LOG.info(_LI('Export record started, backup: %s.'), backup.id)
expected_status = 'available'
actual_status = backup.status
if actual_status != expected_status:
err = (_('Export backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
raise exception.InvalidBackup(reason=err)
backup_record = {}
backup_record['backup_service'] = backup.service
backup_service = self._map_service_to_driver(backup.service)
configured_service = self.driver_name
if backup_service != configured_service:
err = (_('Export record aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') %
{'configured_service': configured_service,
'backup_service': backup_service})
raise exception.InvalidBackup(reason=err)
# Call driver to create backup description string
try:
utils.require_driver_initialized(self.driver)
backup_service = self.service.get_backup_driver(context)
backup_url = backup_service.export_record(backup)
backup_record['backup_url'] = backup_url
except Exception as err:
msg = six.text_type(err)
raise exception.InvalidBackup(reason=msg)
LOG.info(_LI('Export record finished, backup %s exported.'), backup.id)
return backup_record
def import_record(self,
context,
backup,
backup_service,
backup_url,
backup_hosts):
"""Import all volume backup metadata details to the backup db.
:param context: running context
:param backup: The new backup object for the import
:param backup_service: The needed backup driver for import
:param backup_url: An identifier string to locate the backup
:param backup_hosts: Potential hosts to execute the import
:raises: InvalidBackup
:raises: ServiceNotFound
"""
LOG.info(_LI('Import record started, backup_url: %s.'), backup_url)
# Can we import this backup?
if (backup_service != self.driver_name):
# No, are there additional potential backup hosts in the list?
if len(backup_hosts) > 0:
# try the next host on the list, maybe he can import
first_host = backup_hosts.pop()
self.backup_rpcapi.import_record(context,
first_host,
backup,
backup_service,
backup_url,
backup_hosts)
else:
# empty list - we are the last host on the list, fail
err = _('Import record failed, cannot find backup '
'service to perform the import. Request service '
'%(service)s') % {'service': backup_service}
self._update_backup_error(backup, context, err)
raise exception.ServiceNotFound(service_id=backup_service)
else:
# Yes...
try:
utils.require_driver_initialized(self.driver)
backup_service = self.service.get_backup_driver(context)
backup_options = backup_service.import_record(backup_url)
except Exception as err:
msg = six.text_type(err)
self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
required_import_options = ['display_name',
'display_description',
'container',
'size',
'service_metadata',
'service',
'object_count']
backup_update = {}
backup_update['status'] = 'available'
backup_update['service'] = self.driver_name
backup_update['availability_zone'] = self.az
backup_update['host'] = self.host
for entry in required_import_options:
if entry not in backup_options:
msg = (_('Backup metadata received from driver for '
'import is missing %s.'), entry)
self._update_backup_error(backup, context, msg)
raise exception.InvalidBackup(reason=msg)
backup_update[entry] = backup_options[entry]
# Update the database
backup.update(backup_update)
backup.save()
# Verify backup
try:
if isinstance(backup_service, driver.BackupDriverWithVerify):
backup_service.verify(backup.id)
else:
LOG.warning(_LW('Backup service %(service)s does not '
'support verify. Backup id %(id)s is '
'not verified. Skipping verify.'),
{'service': self.driver_name,
'id': backup.id})
except exception.InvalidBackup as err:
with excutils.save_and_reraise_exception():
self._update_backup_error(backup, context,
six.text_type(err))
LOG.info(_LI('Import record id %s metadata from driver '
'finished.'), backup.id)
def reset_status(self, context, backup, status):
"""Reset volume backup status.
:param context: running context
:param backup: The backup object for reset status operation
:param status: The status to be set
:raises: InvalidBackup
:raises: BackupVerifyUnsupportedDriver
:raises: AttributeError
"""
LOG.info(_LI('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.'),
{'backup_id': backup.id,
'status': status})
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the backup status updated. Fail early since there
# are no other status to change but backup's
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Backup driver has not been initialized"))
backup_service = self._map_service_to_driver(backup.service)
LOG.info(_LI('Backup service: %s.'), backup_service)
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Reset backup status aborted, the backup service'
' currently configured [%(configured_service)s] '
'is not the backup service that was used to create'
' this backup [%(backup_service)s].') % \
{'configured_service': configured_service,
'backup_service': backup_service}
raise exception.InvalidBackup(reason=err)
# Verify backup
try:
# check whether the backup is ok or not
if status == 'available' and backup['status'] != 'restoring':
# check whether we could verify the backup is ok or not
if isinstance(backup_service,
driver.BackupDriverWithVerify):
backup_service.verify(backup.id)
backup.status = status
backup.save()
# driver does not support verify function
else:
msg = (_('Backup service %(configured_service)s '
'does not support verify. Backup id'
' %(id)s is not verified. '
'Skipping verify.') %
{'configured_service': self.driver_name,
'id': backup.id})
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# reset status to error or from restoring to available
else:
if (status == 'error' or
(status == 'available' and
backup.status == 'restoring')):
backup.status = status
backup.save()
except exception.InvalidBackup:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Backup id %s is not invalid. "
"Skipping reset."), backup.id)
except exception.BackupVerifyUnsupportedDriver:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Backup service %(configured_service)s '
'does not support verify. Backup id '
'%(id)s is not verified. '
'Skipping verify.'),
{'configured_service': self.driver_name,
'id': backup.id})
except AttributeError:
msg = (_('Backup service %(service)s does not support '
'verify. Backup id %(id)s is not verified. '
'Skipping reset.') %
{'service': self.driver_name,
'id': backup.id})
LOG.error(msg)
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# send notification to ceilometer
notifier_info = {'id': backup.id, 'update': {'status': status}}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, "backups.reset_status.end",
notifier_info)
def check_support_to_force_delete(self, context):
"""Check if the backup driver supports force delete operation.
:param context: running context
"""
backup_service = self.service.get_backup_driver(context)
return backup_service.support_force_delete
|
|
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.directnotify import DirectNotifyGlobal
from toontown.hood import Place
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from otp.distributed.TelemetryLimiter import RotationLimitToH, TLGatherAllAvs
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownBattleGlobals
from toontown.battle import BattlePlace
from toontown.suit import Suit
from otp.nametag.NametagConstants import *
from otp.nametag import NametagGlobals
import math
class CogHQBossBattle(BattlePlace.BattlePlace):
notify = DirectNotifyGlobal.directNotify.newCategory('CogHQBossBattle')
def __init__(self, loader, parentFSM, doneEvent):
BattlePlace.BattlePlace.__init__(self, loader, doneEvent)
self.parentFSM = parentFSM
self.bossCog = None
self.teleportInPosHpr = (0, 0, 0, 0, 0, 0)
self.fsm = ClassicFSM.ClassicFSM('CogHQBossBattle', [State.State('start', self.enterStart, self.exitStart, ['walk',
'tunnelIn',
'teleportIn',
'movie']),
State.State('battle', self.enterBattle, self.exitBattle, ['walk', 'died', 'movie']),
State.State('finalBattle', self.enterFinalBattle, self.exitFinalBattle, ['walk',
'stickerBook',
'teleportOut',
'died',
'tunnelOut',
'DFA',
'battle',
'movie',
'ouch',
'crane',
'WaitForBattle',
'squished']),
State.State('movie', self.enterMovie, self.exitMovie, ['walk',
'battle',
'finalBattle',
'died',
'teleportOut']),
State.State('ouch', self.enterOuch, self.exitOuch, ['walk',
'battle',
'finalBattle',
'died',
'crane']),
State.State('crane', self.enterCrane, self.exitCrane, ['walk',
'battle',
'finalBattle',
'died',
'ouch',
'squished']),
State.State('walk', self.enterWalk, self.exitWalk, ['stickerBook',
'teleportOut',
'died',
'tunnelOut',
'DFA',
'battle',
'movie',
'ouch',
'crane',
'finalBattle',
'WaitForBattle']),
State.State('stickerBook', self.enterStickerBook, self.exitStickerBook, ['walk',
'DFA',
'WaitForBattle',
'movie',
'battle']),
State.State('WaitForBattle', self.enterWaitForBattle, self.exitWaitForBattle, ['battle', 'walk', 'movie']),
State.State('DFA', self.enterDFA, self.exitDFA, ['DFAReject', 'teleportOut', 'tunnelOut']),
State.State('DFAReject', self.enterDFAReject, self.exitDFAReject, ['walk']),
State.State('teleportIn', self.enterTeleportIn, self.exitTeleportIn, ['walk']),
State.State('teleportOut', self.enterTeleportOut, self.exitTeleportOut, ['teleportIn', 'final', 'WaitForBattle']),
State.State('died', self.enterDied, self.exitDied, ['final']),
State.State('tunnelIn', self.enterTunnelIn, self.exitTunnelIn, ['walk']),
State.State('tunnelOut', self.enterTunnelOut, self.exitTunnelOut, ['final']),
State.State('squished', self.enterSquished, self.exitSquished, ['finalBattle',
'crane',
'died',
'teleportOut']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
return
def load(self):
BattlePlace.BattlePlace.load(self)
self.parentFSM.getStateNamed('cogHQBossBattle').addChild(self.fsm)
self.townBattle = self.loader.townBattle
for i in xrange(1, 3):
Suit.loadSuits(i)
def unload(self):
BattlePlace.BattlePlace.unload(self)
self.parentFSM.getStateNamed('cogHQBossBattle').removeChild(self.fsm)
del self.parentFSM
del self.fsm
self.ignoreAll()
for i in xrange(1, 3):
Suit.unloadSuits(i)
def getTaskZoneId(self):
return base.cr.playGame.hood.id
def enter(self, requestStatus, bossCog):
self.zoneId = requestStatus['zoneId']
BattlePlace.BattlePlace.enter(self)
self.fsm.enterInitialState()
self.bossCog = bossCog
if self.bossCog:
self.bossCog.d_avatarEnter()
self._telemLimiter = TLGatherAllAvs('CogHQBossBattle', RotationLimitToH)
NametagGlobals.setMasterArrowsOn(1)
base.localAvatar.inventory.setRespectInvasions(0)
self.fsm.request(requestStatus['how'], [requestStatus])
def exit(self):
self.fsm.requestFinalState()
base.localAvatar.inventory.setRespectInvasions(1)
if self.bossCog:
self.bossCog.d_avatarExit()
self.bossCog = None
self._telemLimiter.destroy()
del self._telemLimiter
BattlePlace.BattlePlace.exit(self)
return
def enterBattle(self, event):
mult = 1
if self.bossCog:
mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(self.bossCog.battleNumber)
self.townBattle.enter(event, self.fsm.getStateNamed('battle'), bldg=1, creditMultiplier=mult)
base.localAvatar.b_setAnimState('off', 1)
base.localAvatar.setTeleportAvailable(0)
base.localAvatar.cantLeaveGame = 1
def exitBattle(self):
self.townBattle.exit()
def enterFinalBattle(self):
self.walkStateData.enter()
self.walkStateData.fsm.request('walking')
base.localAvatar.setTeleportAvailable(0)
base.localAvatar.setTeleportAllowed(0)
base.localAvatar.cantLeaveGame = 0
base.localAvatar.book.hideButton()
self.ignore(ToontownGlobals.StickerBookHotkey)
self.ignore('enterStickerBook')
self.ignore(ToontownGlobals.OptionsPageHotkey)
def exitFinalBattle(self):
self.walkStateData.exit()
base.localAvatar.setTeleportAllowed(1)
def enterMovie(self, requestStatus = None):
base.localAvatar.setTeleportAvailable(0)
def exitMovie(self):
pass
def enterOuch(self):
base.localAvatar.setTeleportAvailable(0)
base.localAvatar.laffMeter.start()
def exitOuch(self):
base.localAvatar.laffMeter.stop()
def enterCrane(self):
base.localAvatar.setTeleportAvailable(0)
base.localAvatar.laffMeter.start()
base.localAvatar.collisionsOn()
def exitCrane(self):
base.localAvatar.collisionsOff()
base.localAvatar.laffMeter.stop()
def enterWalk(self, teleportIn = 0):
BattlePlace.BattlePlace.enterWalk(self, teleportIn)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
base.localAvatar.setTeleportAllowed(0)
base.localAvatar.book.hideButton()
self.ignore(ToontownGlobals.StickerBookHotkey)
self.ignore('enterStickerBook')
self.ignore(ToontownGlobals.OptionsPageHotkey)
self.ignore(self.walkDoneEvent)
def exitWalk(self):
BattlePlace.BattlePlace.exitWalk(self)
base.localAvatar.setTeleportAllowed(1)
def enterStickerBook(self, page = None):
BattlePlace.BattlePlace.enterStickerBook(self, page)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterSit(self):
BattlePlace.BattlePlace.enterSit(self)
self.ignore('teleportQuery')
base.localAvatar.setTeleportAvailable(0)
def enterTeleportIn(self, requestStatus):
base.localAvatar.detachNode()
base.localAvatar.setPosHpr(*self.teleportInPosHpr)
BattlePlace.BattlePlace.enterTeleportIn(self, requestStatus)
def enterTeleportOut(self, requestStatus):
BattlePlace.BattlePlace.enterTeleportOut(self, requestStatus, self.__teleportOutDone)
def __teleportOutDone(self, requestStatus):
hoodId = requestStatus['hoodId']
if hoodId == ToontownGlobals.MyEstate:
self.getEstateZoneAndGoHome(requestStatus)
else:
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
def enterSquished(self):
base.localAvatar.laffMeter.start()
base.localAvatar.b_setAnimState('Flattened')
def handleSquishDone(self, extraArgs = []):
base.cr.playGame.getPlace().setState('walk')
def exitSquished(self):
taskMgr.remove(base.localAvatar.uniqueName('finishSquishTask'))
base.localAvatar.laffMeter.stop()
|
|
#!/usr/bin/python
"""High-level interface to the classes and methods in the package.
"""
from pyci.msg import err, warn, okay, info, vms, set_verbose
settings = None
"""The GlobalSettings instance with interpreted contents of 'global.xml'.
"""
db = {}
"""The scripts database which includes cron status, enabled status and a
list of the installed repositories.
"""
datapath = None
"""The full path to the data file to which 'db' is serialized."""
args = None
"""The dictionary of arguments passed to the script."""
def examples():
"""Prints examples of using the script to the console using colored output.
"""
script = "Continuous Integration Automation Server"
explain = ("For complex codes with many collaborators, it is often difficult to maintian "
"a pristine code that everyone can rely on. If every developer has power to "
"commit to master, unintentional mistakes happen that can cripple those who "
"rely on the code for day-to-day business. One way to overcome this is to isolate "
"the master branch and require collaborators to work on separate forks/branches. "
"When they are ready to commit their changes to master, they create a pull request "
"that summarizes the changes and why they want to merge them into the master branch.\n\n"
"A continuous integration server monitors repositories for new pull requests. When a new "
"request is made, the proposed changes are downloaded to a local sandbox and tested "
"against all the existing code. If the master branch has a rich suite of unit tests "
"this will detect any bugs in the proposed merger. If all the tests pass, then the "
"owner of the master branch can have confidence that the merger will be okay.")
contents = [(("Configure this machine to be a CI server. Unfortunately, this step requires "
"sudo authority because the API accesses the crontab for arbitrary users."),
"sudo ci.py -setup",
("Before this setup can proceed, you need to make sure the global configuration "
"XML file has been created and the environment variable to its path has been set:\n"
"\texport PYCI_XML='~/path/to/global.xml'.\nSee also: -rollback")),
(("Remove the cron tab from the server, delete the list of installed repositories "
"and undo anything else that the script did when -setup was used."),
"sudo ci.py -rollback",
("This action deletes the files specified in 'ARCHFILE' and 'DATAFILE' in 'global.xml'. "
"Also, the crontab is removed, which is why sudo privileges are needed. See also -setup.")),
(("Install the repository described by myrepo.xml onto the CI server so that "
"it's pull requests are monitored and unit ,tested."),
"ci.py -install myrepo.xml",
("After installation, you can query the repository immediately by running the "
"script with -cron. You can install a list of repositories with a single command."
"See also -uninstall.")),
(("Run the routines that check for new pull requests, run the unit tests, and post "
"the results to the media wiki."),
"ci.py -cron", "")]
required = ("REQUIRED:\n\t-'repo.xml' file for *each* repository that gets installed on the server.\n"
"\t-'global.xml' file with configuration settings for *all* repositories.\n"
"\t- git user and API key with push access for *each* repository installed.")
output = ("RETURNS: prints status information to stdout.")
details = ("This script installs a continous integration server on the local machine by "
"configuring a cron to call this script every couple of minutes. The script interacts "
"with github using an API to monitor the pull requests. When new ones are found, the "
"list of tests specified in the 'repo.xml' file is executed and the results are posted "
"to a media wiki page associated with the specific pull request. For more details, see "
"the online repo at https://github.com/rosenbrockc/ci.")
outputfmt = ("")
from pyci.msg import example
example(script, explain, contents, required, output, outputfmt, details)
def _parser_options():
"""Parses the options and arguments from the command line."""
import argparse
parser = argparse.ArgumentParser(description="UNCLE Cron Server")
parser.add_argument("-examples", action="store_true",
help="Display examples of how to use this script.")
parser.add_argument("-setup", action="store_true",
help=("Setup the cron tab and script database for this server so "
"that it is ready to have repositories installed."))
parser.add_argument("-rollback", action="store_true",
help=("Remove this script's cron tab and reverse other things done "
"by this script. This does not delete this script."))
parser.add_argument("-enable", action="store_true",
help="Re-enable the continuous integration server.")
parser.add_argument("-disable", action="store_true",
help=("Disable the continuous integration server so that it no longer "
"monitors the installed repositories."))
parser.add_argument("-cron", action="store_true",
help=("Run the continuous integration routines for all the repos installed "
"in this script's database."))
parser.add_argument("-list", action="store_true",
help="List all the repositories in the CI server's database.")
parser.add_argument("-install", nargs="+",
help=("Install the specified XML file(s) as repositories to be monitored "
"by the CI server."))
parser.add_argument("-uninstall", nargs="+",
help=("Uninstall the specified XML file(s) as repositories from "
"the CI server."))
parser.add_argument("--verbose", nargs="?", type=int, const=1,
help="Runs the CI server in verbose mode.")
parser.add_argument("-cronfreq", type=int, default=1,
help="Specify the frequency at which the cron runs.")
parser.add_argument("-nolive", action="store_true",
help=("For unit testing, when specified no live requests are made to "
"servers and all the class actions are performed in test mode. "
"This also prevents the cron tab from being installed."))
global args
args = vars(parser.parse_known_args()[0])
if args["examples"]:
examples()
exit(0)
return args
def _load_db():
"""Deserializes the script database from JSON."""
from os import path
from pyci.utility import get_json
global datapath, db
datapath = path.abspath(path.expanduser(settings.datafile))
vms("Deserializing DB from {}".format(datapath))
db = get_json(datapath, {"installed": [], "enabled": True, "cron": False})
def _save_db():
"""Serializes the contents of the script db to JSON."""
from pyci.utility import json_serial
import json
vms("Serializing DB to JSON in {}".format(datapath))
with open(datapath, 'w') as f:
json.dump(db, f, default=json_serial)
def _get_real_user():
"""Returns the name of the actual user account, even if running in sudo."""
import os
return os.path.expanduser("~").split("/")[-1]
def _check_virtualenv():
"""Makes sure that the virtualenv specified in the global settings file
actually exists.
"""
from os import waitpid
from subprocess import Popen, PIPE
penvs = Popen("source /usr/local/bin/virtualenvwrapper.sh; workon",
shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
waitpid(penvs.pid, 0)
envs = penvs.stdout.readlines()
enverr = penvs.stderr.readlines()
result = (settings.venv + '\n') in envs and len(enverr) == 0
vms("Find virtualenv: {}".format(' '.join(envs).replace('\n', '')))
vms("Find virtualenv | stderr: {}".format(' '.join(enverr)))
if not result:
info(envs)
err("The virtualenv '{}' does not exist; can't use CI server.".format(settings.venv))
if len(enverr) > 0:
map(err, enverr)
return result
def _check_global_settings():
"""Makes sure that the global settings environment variable and file
exist for configuration.
"""
global settings
if settings is not None:
#We must have already loaded this and everything was okay!
return True
from os import getenv
result = False
if getenv("PYCI_XML") is None:
err("The environment variable PYCI_XML for the global configuration "
"has not been set.")
else:
from os import path
fullpath = path.abspath(path.expanduser(getenv("PYCI_XML")))
if not path.isfile(fullpath):
err("The file {} for global configuration does not exist.".format(fullpath))
else:
from pyci.config import GlobalSettings
settings = GlobalSettings()
result = True
return result
def _setup_crontab():
"""Sets up the crontab if it hasn't already been setup."""
from crontab import CronTab
#Since CI works out of a virtualenv anyway, the `ci.py` script will be
#installed in the bin already, so we can call it explicitly.
command = '/bin/bash -c "source ~/.cron_profile; workon {}; ci.py -cron"'.format(settings.venv)
user = _get_real_user()
if args["nolive"]:
vms("Skipping cron tab configuration because 'nolive' enabled.")
return
cron = CronTab(user=user)
#We need to see if the cron has already been created for this command.
existing = False
possible = cron.find_comment("pyci_cron")
if len(list(possible)) > 0:
if args["rollback"]:
vms("Removing {} from cron tab.".format(command))
cron.remove_all(command)
cron.write()
db["cron"] = False
_save_db()
else:
existing = True
if not existing and not args["rollback"]:
job = cron.new(command=command, comment="pyci_cron")
#Run the cron every minute of every hour every day.
if args["cronfreq"] == 1:
vms("New cron tab configured *minutely* for {}".format(command))
job.setall("* * * * *")
else:
vms("New cron tab configured every {} minutes for {}.".format(args["cronfreq"], command))
job.setall("*/{} * * * *".format(args["cronfreq"]))
cron.write()
db["cron"] = True
_save_db()
def _cron_profile():
"""Sets up the .cron_profile file if it does not already exist.
"""
#The main ingredients of the file are the import of the virtualenvwrapper
#and the setting of the PYCI_XML variable for global configuration.
from os import path
cronpath = path.expanduser("~/.cron_profile")
if not path.isfile(cronpath):
from os import getenv
xmlpath = getenv("PYCI_XML")
contents = ['source /usr/local/bin/virtualenvwrapper.sh',
'export PYCI_XML="{}"'.format(xmlpath)]
with open(cronpath, 'w') as f:
f.write('\n'.join(contents))
def _setup_server():
"""Checks whether the server needs to be setup if a repo is being installed.
If it does, checks whether anything needs to be done.
"""
if args["setup"] or args["install"]:
#If the cron has been configured, it means that the server has been
#setup. We also perform some checks of the configuration file and the
#existence of the virtualenv.
if not _check_global_settings() or not _check_virtualenv():
return False
_cron_profile()
if "cron" in db and not db["cron"]:
_setup_crontab()
if args["rollback"]:
_setup_crontab()
def _server_rollback():
"""Removes script database and archive files to rollback the CI server
installation.
"""
#Remove the data and archive files specified in settings. The cron
#gets remove by the _setup_server() script if -rollback is specified.
from os import path, remove
archpath = path.abspath(path.expanduser(settings.archfile))
if path.isfile(archpath) and not args["nolive"]:
vms("Removing archive JSON file at {}.".format(archpath))
remove(archpath)
datapath = path.abspath(path.expanduser(settings.datafile))
if path.isfile(datapath) and not args["nolive"]:
vms("Removing script database JSON file at {}".format(datapath))
remove(datapath)
def _server_enable():
"""Checks whether the server should be enabled/disabled and makes the
change accordingly.
"""
prev = None if "enabled" not in db else db["enabled"]
if args["disable"]:
db["enabled"] = False
okay("Disabled the CI server. No pull requests will be processed.")
if args["enable"]:
db["enabled"] = True
okay("Enabled the CI server. Pull request monitoring online.")
#Only perform the save if something actually changed.
if prev != db["enabled"]:
_save_db()
def _find_next(server):
"""Finds the name of the next repository to run based on the *current*
state of the database.
"""
from datetime import datetime
#Re-load the database in case we have multiple instances of the script
#running in memory.
_load_db()
result = None
visited = []
if "status" in db:
for reponame, status in db["status"].items():
vms("Checking cron status for {}: {}".format(reponame, status))
start = None if "started" not in status else status["started"]
end = None if "end" not in status else status["end"]
running = start is not None and end is not None and start > end
add = False
if not running and end is not None:
#Check the last time it was run and see if enough time has
#elapsed.
elapsed = (datetime.now() - end).seconds/60
add = elapsed > server.cron.settings[reponame].frequency
if not add:
vms("'{}' skipped because the interval hasn't ".format(reponame) +
"elapsed ({} vs. {})".format(elapsed, server.cron.settings[reponame].frequency))
elif end is None:
add = True
if add:
result = reponame
break
visited.append(reponame)
else:
db["status"] = {}
if result is None:
#We still need to check the newly installed repos.
for reponame, repo in server.repositories.items():
if reponame not in visited:
#These are newly installed repos that have never run before.
vms("Added '{}' as new repo for cron execution.".format(reponame))
result = reponame
break
return result
def _do_cron():
"""Handles the cron request to github to check for new pull requests. If
any are found, they are run *sequentially* until they are all completed.
"""
if not args["cron"]:
return
if ("enabled" in db and not db["enabled"]) or "enabled" not in db:
warn("The CI server is disabled. Exiting.")
exit(0)
#Our basic idea with the cron is as follows:
# - the cron runs every minute of the day.
# - each installed XML file has the last time it ran saved in the script's
# database. If the specified check frequency has elapsed since it last
# ran, then we run the repository server checks.
# - NB: before running the time-intensive checks against remote servers
# or running the unit tests, first update the running status of the repo
# so that another call with -cron doesn't duplicate the work!
#By having the cron run every minute, we maximize the probability that
#repo checks with time intensive unit tests may run in parallel. Since
#servers usually have many cores, this shouldn't impact the run times too
#severely unless the tests are disk intensive.
#We use the repo full names as keys in the db's status dictionary.
from pyci.server import Server
from datetime import datetime
attempted = []
server = Server(testmode=args["nolive"])
nextrepo = _find_next(server)
dbs = db["status"]
while nextrepo is not None:
vms("Working on '{}' in cron.".format(nextrepo))
if nextrepo in attempted:
#This makes sure we don't end up in an infinite loop.
vms("'{}' has already been handled! Exiting infinite loop.".format(nextrepo))
break
if nextrepo not in dbs:
vms("Created blank status dictionary for '{}' in db.".format(nextrepo))
dbs[nextrepo] = {"start": None, "end": None}
dbs[nextrepo]["start"] = datetime.now()
_save_db()
#Now that we have saved our intent to run these repo-checks, let's
#actually run them.
attempted.append(nextrepo)
server.runnable = [nextrepo]
if not args["nolive"]:
vms("Starting pull request processing for '{}'.".format(nextrepo))
server.process_pulls()
dbs[nextrepo]["end"] = datetime.now()
_save_db()
nextrepo = _find_next(server)
def _fmt_time(time):
"""Returns the formatted time if it is not None."""
if time is not None:
return time.strftime("%m/%d/%Y %H:%M")
else:
return "-"
def _list_repos():
"""Lists all the installed repos as well as their last start and finish
times from the cron's perspective.
"""
if not args["list"]:
return
#Just loop over the list of repos we have in a server instance. See if
#they also exist in the db's status; if they do, include the start/end
#times we have saved.
from pyci.server import Server
server = Server(testmode=args["nolive"])
output = ["Repository | Started | Finished | XML File Path",
"--------------------------------------------------------------------------"]
dbs = {} if "status" not in db else db["status"]
fullfmt = "{0:<20} | {1:^16} | {2:^16} | {3}"
for reponame, repo in server.repositories.items():
if reponame in dbs:
start = _fmt_time(dbs[reponame]["start"])
end = _fmt_time(dbs[reponame]["end"])
else:
start = "Never"
end = "Never"
output.append(fullfmt.format(reponame, start, end, repo.filepath))
info('\n'.join(output))
def _handle_install():
"""Handles the (un)installation of repositories on this CI server.
"""
from pyci.server import Server
if args["install"]:
server = Server(testmode=args["nolive"])
for xpath in args["install"]:
server.install(xpath)
okay("Installed {} into the CI server.".format(xpath))
if args["uninstall"]:
server = Server(testmode=args["nolive"])
for xpath in args["uninstall"]:
server.uninstall(xpath)
okay("Uninstalled {} from the CI server.".format(xpath))
def run():
"""Main script entry to handle the arguments given to the script."""
_parser_options()
set_verbose(args["verbose"])
if _check_global_settings():
_load_db()
else:
exit(-1)
#Check the server configuration against the script arguments passed in.
_setup_server()
if args["rollback"]:
_server_rollback()
okay("The server rollback appears to have been successful.")
exit(0)
_server_enable()
_list_repos()
_handle_install()
#This is the workhorse once a successful installation has happened.
_do_cron()
if __name__ == "__main__":
run()
|
|
import copy
import os
from distutils.version import LooseVersion
import nibabel
import nilearn
from nibabel import load as nibabel_load
from nibabel import Nifti1Image as NibabelNifti1Image
from nilearn._utils import check_niimg
from nilearn._utils.class_inspect import get_params
from nilearn._utils.compat import _basestring
from nilearn._utils.niimg import short_repr, _get_target_dtype
from nilearn.input_data import MultiNiftiMasker
from nilearn.input_data.nifti_masker import filter_and_mask, NiftiMasker
from sklearn.externals import joblib as joblib
from sklearn.externals.joblib.func_inspect import filter_args
from sklearn.externals.joblib.hashing import NumpyHasher
# We rely on this patch to use nibabel image affine depending
# upon the older or newer versions of nibabel.
# XXX: Should be removed when dropped supporting older nibabel
# versions < 2.0.0.
if LooseVersion(nibabel.__version__) >= LooseVersion('2.0.0'):
def get_affine(img):
return img.affine
else:
def get_affine(img):
return img.get_affine()
def load(filename, **kwargs):
img = nibabel_load(filename, **kwargs)
img.__class__ = Nifti1Image
return img
class Nifti1Image(NibabelNifti1Image):
def __getstate__(self):
state = {'dataobj': self._dataobj,
'header': self.header,
'filename': self.get_filename(),
'affine': self.affine,
'extra': self.extra}
return state
def __setstate__(self, state):
new_self = Nifti1Image(dataobj=state['dataobj'],
affine=state['affine'],
header=state['header'],
extra=state['extra'],
)
self.__dict__ = new_self.__dict__
if state['filename'] is not None:
self.set_filename(state['filename'])
class NibabelHasher(NumpyHasher):
def __init__(self, hash_name='md5', coerce_mmap=False):
"""
Parameters
----------
hash_name: string
The hash algorithm to be used
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
objects.
"""
NumpyHasher.__init__(self, hash_name=hash_name,
coerce_mmap=coerce_mmap)
import nibabel as nibabel
self.nibabel = nibabel
def save(self, obj):
if isinstance(obj, Nifti1Image):
filename = obj.get_filename()
if filename is not None:
stat = os.stat(filename)
last_modified = stat.st_mtime
klass = obj.__class__
obj = (klass, ('HASHED', filename, last_modified))
NumpyHasher.save(self, obj)
def our_hash(obj, hash_name='md5', coerce_mmap=False):
""" Quick calculation of a hash to identify uniquely Python objects
containing numpy arrays.
Parameters
-----------
hash_name: 'md5' or 'sha1'
Hashing algorithm used. sha1 is supposedly safer, but md5 is
_faster.
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
"""
hasher = NibabelHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
hash = hasher.hash(obj)
return hash
def our_get_argument_hash(self, *args, **kwargs):
return our_hash(filter_args(self.func, self.ignore,
args, kwargs),
coerce_mmap=True)
def our_load_niimg(niimg, dtype=None):
"""Load a niimg, check if it is a nibabel SpatialImage and cast if needed
Parameters:
-----------
niimg: Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html.
Image to load.
dtype: {dtype, "auto"}
Data type toward which the data should be converted. If "auto", the
data will be converted to int32 if dtype is discrete and float32 if it
is continuous.
Returns:
--------
img: image
A loaded image object.
"""
from nilearn.image import new_img_like # avoid circular imports
if isinstance(niimg, _basestring):
# data is a filename, we load it
niimg = nibabel.load(niimg)
elif not isinstance(niimg, nibabel.spatialimages.SpatialImage):
raise TypeError("Data given cannot be loaded because it is"
" not compatible with nibabel format:\n"
+ short_repr(niimg))
try:
this_dtype = niimg.get_data_dtype()
except AttributeError:
# Nibabel bug
this_dtype = niimg.get_data().dtype
dtype = _get_target_dtype(this_dtype, dtype)
if dtype is not None:
niimg = new_img_like(niimg, niimg.get_data().astype(dtype),
get_affine(niimg))
return niimg
def our_multi_nifti_masker_transform(self, imgs, confounds=None):
""" Apply mask, spatial and temporal preprocessing
Parameters
----------
imgs: list of Niimg-like objects
See http://nilearn.github.io/manipulating_images/input_output.html.
Data to be preprocessed
confounds: CSV file path or 2D matrix
This parameter is passed to signal.clean. Please see the
corresponding documentation for details.
Returns
-------
data: {list of numpy arrays}
preprocessed images
"""
self._check_fitted()
if not hasattr(imgs, '__iter__') \
or isinstance(imgs, _basestring):
return self.transform_single_imgs(imgs, confounds=confounds)
return self.transform_imgs(imgs, confounds, n_jobs=self.n_jobs)
def our_transform_single_imgs(self, imgs, confounds=None, copy=True):
"""Apply mask, spatial and temporal preprocessing
Parameters
----------
imgs: 3D/4D Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html.
Images to process. It must boil down to a 4D image with scans
number as last dimension.
confounds: CSV file or array-like, optional
This parameter is passed to signal.clean. Please see the related
documentation for details.
shape: (number of scans, number of confounds)
Returns
-------
region_signals: 2D numpy.ndarray
Signal for each voxel inside the mask.
shape: (number of scans, number of voxels)
"""
# Ignore the mask-computing params: they are not useful and will
# just invalid the cache for no good reason
# target_shape and target_affine are conveyed implicitly in mask_img
imgs = check_niimg(imgs)
params = get_params(self.__class__, self,
ignore=['mask_img', 'mask_args', 'mask_strategy'])
data = self._cache(filter_and_mask,
ignore=['verbose', 'memory', 'memory_level',
'copy'],
shelve=self._shelving)(
imgs, self.mask_img_, params,
memory_level=self.memory_level,
memory=self.memory,
verbose=self.verbose,
confounds=confounds,
copy=copy
)
return data
def monkey_patch_nifti_image():
nibabel.load = load
joblib.memory.MemorizedFunc._get_argument_hash = our_get_argument_hash
nilearn._utils.niimg.load_niimg = our_load_niimg
NiftiMasker.transform_single_imgs = our_transform_single_imgs
MultiNiftiMasker.transform = our_multi_nifti_masker_transform
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes.
Note that the symbols that are exported to v1 tf.train namespace are also
exported to v2 in tf.estimator namespace. See
https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.client import timeline
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
from tensorflow.python.util.tf_export import tf_export
_HOOKS = "hooks"
_STEPS_PER_RUN_VAR = "steps_per_run"
class _HookTimer(object):
"""Base timer for determining when Hooks should trigger.
Should not be instantiated directly.
"""
def __init__(self):
pass
def reset(self):
"""Resets the timer."""
pass
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step."""
raise NotImplementedError
def update_last_triggered_step(self, step):
"""Update the last triggered time and step number.
Args:
step: The current step.
Returns:
A pair `(elapsed_time, elapsed_steps)`, where `elapsed_time` is the number
of seconds between the current trigger and the last one (a float), and
`elapsed_steps` is the number of steps between the current trigger and
the last one. Both values will be set to `None` on the first trigger.
"""
raise NotImplementedError
def last_triggered_step(self):
"""Returns the last triggered time step or None if never triggered."""
raise NotImplementedError
@tf_export(v1=["train.SecondOrStepTimer"])
class SecondOrStepTimer(_HookTimer):
"""Timer that triggers at most once every N seconds or once every N steps.
This symbol is also exported to v2 in tf.estimator namespace. See
https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py
"""
def __init__(self, every_secs=None, every_steps=None):
self.reset()
self._every_secs = every_secs
self._every_steps = every_steps
if self._every_secs is None and self._every_steps is None:
raise ValueError("Either every_secs or every_steps should be provided.")
if (self._every_secs is not None) and (self._every_steps is not None):
raise ValueError("Can not provide both every_secs and every_steps.")
super(SecondOrStepTimer, self).__init__()
def reset(self):
self._last_triggered_step = None
self._last_triggered_time = None
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step.
Args:
step: Training step to trigger on.
Returns:
True if the difference between the current time and the time of the last
trigger exceeds `every_secs`, or if the difference between the current
step and the last triggered step exceeds `every_steps`. False otherwise.
"""
if self._last_triggered_step is None:
return True
if self._last_triggered_step == step:
return False
if self._every_secs is not None:
if time.time() >= self._last_triggered_time + self._every_secs:
return True
if self._every_steps is not None:
if step >= self._last_triggered_step + self._every_steps:
return True
return False
def update_last_triggered_step(self, step):
current_time = time.time()
if self._last_triggered_time is None:
elapsed_secs = None
elapsed_steps = None
else:
elapsed_secs = current_time - self._last_triggered_time
elapsed_steps = step - self._last_triggered_step
self._last_triggered_time = current_time
self._last_triggered_step = step
return (elapsed_secs, elapsed_steps)
def last_triggered_step(self):
return self._last_triggered_step
class NeverTriggerTimer(_HookTimer):
"""Timer that never triggers."""
def should_trigger_for_step(self, step):
_ = step
return False
def update_last_triggered_step(self, step):
_ = step
return (None, None)
def last_triggered_step(self):
return None
@tf_export(v1=["train.LoggingTensorHook"])
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints the given tensors every N local steps, every N seconds, or at end.
The tensors will be printed to the log, with `INFO` severity. If you are not
seeing the logs, you might want to add the following line after your imports:
```python
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
```
Note that if `at_end` is True, `tensors` should not include any tensor
whose evaluation produces a side effect such as consuming additional inputs.
@compatibility(TF2)
Please check this [notebook][notebook] on how to migrate the API to TF2.
[notebook]:https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate/logging_stop_hook.ipynb
@end_compatibility
"""
def __init__(self,
tensors,
every_n_iter=None,
every_n_secs=None,
at_end=False,
formatter=None):
"""Initializes a `LoggingTensorHook`.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names, or
`iterable` of tensors/tensor names.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
at_end: `bool` specifying whether to print the values of `tensors` at the
end of the run.
formatter: function, takes dict of `tag`->`Tensor` and returns a string.
If `None` uses default printing all tensors.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
only_log_at_end = (
at_end and (every_n_iter is None) and (every_n_secs is None))
if (not only_log_at_end and
(every_n_iter is None) == (every_n_secs is None)):
raise ValueError(
"either at_end and/or exactly one of every_n_iter and every_n_secs "
"must be provided.")
if every_n_iter is not None and every_n_iter <= 0:
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
self._tag_order = tensors
tensors = {item: item for item in tensors}
else:
self._tag_order = sorted(tensors.keys())
self._tensors = tensors
self._formatter = formatter
self._timer = (
NeverTriggerTimer() if only_log_at_end else SecondOrStepTimer(
every_secs=every_n_secs, every_steps=every_n_iter))
self._log_at_end = at_end
def begin(self):
self._timer.reset()
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {
tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()
}
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
if self._should_trigger:
return SessionRunArgs(self._current_tensors)
else:
return None
def _log_tensors(self, tensor_values):
original = np.get_printoptions()
np.set_printoptions(suppress=True)
elapsed_secs, _ = self._timer.update_last_triggered_step(self._iter_count)
if self._formatter:
logging.info(self._formatter(tensor_values))
else:
stats = []
for tag in self._tag_order:
stats.append("%s = %s" % (tag, tensor_values[tag]))
if elapsed_secs is not None:
logging.info("%s (%.3f sec)", ", ".join(stats), elapsed_secs)
else:
logging.info("%s", ", ".join(stats))
np.set_printoptions(**original)
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
self._log_tensors(run_values.results)
self._iter_count += 1
def end(self, session):
if self._log_at_end:
values = session.run(self._current_tensors)
self._log_tensors(values)
def get_or_create_steps_per_run_variable():
"""Gets or creates the steps_per_run variable.
In Estimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each
device program execution and before the next execution.
The purpose of using a variable, rather than a constant, is to allow
Estimator adapt the device training iterations according to the final steps
specified by users. For example, if the user sets the steps_per_run as
4 and steps as 10 in Estimator.train(), the steps_per_run
variable will have the following value before each training run.
- 1-st execution: steps_per_run = 4
- 2-nd execution: steps_per_run = 4
- 3-rd execution: steps_per_run = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi steps_per_run variables were found.
"""
graph = ops.get_default_graph()
collection_name = "{}_{}".format(_HOOKS, _STEPS_PER_RUN_VAR)
steps_per_run_vars = graph.get_collection(collection_name)
if len(steps_per_run_vars) == 1:
return steps_per_run_vars[0]
elif len(steps_per_run_vars) > 1:
raise RuntimeError("Multiple steps_per_run_var in collection.")
with variable_scope.variable_scope(_HOOKS, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_STEPS_PER_RUN_VAR,
initializer=init_ops.ones_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
class _MultiStepStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps=None, last_step=None, steps_per_run=1):
"""Initializes a `MultiStepStopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
In Estimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The steps_per_run variable
determines the number of iterations of the loop before returning to the CPU.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
steps_per_run: Number of steps executed per run call.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
if steps_per_run is None or steps_per_run < 1:
raise ValueError("steps_per_run should be greater than 0")
self._num_steps = num_steps
self._last_step = last_step
self._steps_per_run_initial_value = steps_per_run
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
self._steps_per_run_variable = get_or_create_steps_per_run_variable()
def _update_steps_per_run_variable(self, global_step, session):
steps = min(self._last_step - global_step,
self._steps_per_run_initial_value)
self._steps_per_run_variable.load(steps, session=session)
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
self._update_steps_per_run_variable(global_step, session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition in hook execution.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
self._update_steps_per_run_variable(global_step, run_context.session)
@tf_export(v1=["train.StopAtStepHook"])
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
@compatibility(TF2)
Please check this [notebook][notebook] on how to migrate the API to TF2.
[notebook]:https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate/logging_stop_hook.ipynb
@end_compatibility
"""
def __init__(self, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def after_create_session(self, session, coord):
if self._last_step is None:
global_step = session.run(self._global_step_tensor)
self._last_step = global_step + self._num_steps
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results + 1
if global_step >= self._last_step:
# Check latest global step to ensure that the targeted last step is
# reached. global_step read tensor is the value of global step
# before running the operation. We're not sure whether current session.run
# incremented the global_step or not. Here we're checking it.
step = run_context.session.run(self._global_step_tensor)
if step >= self._last_step:
run_context.request_stop()
@tf_export(v1=["train.CheckpointSaverListener"])
class CheckpointSaverListener(object):
"""Interface for listeners that take action before or after checkpoint save.
`CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is
triggered, and provides callbacks at the following points:
- before using the session
- before each call to `Saver.save()`
- after each call to `Saver.save()`
- at the end of session
To use a listener, implement a class and pass the listener to a
`CheckpointSaverHook`, as in this example:
```python
class ExampleCheckpointSaverListener(CheckpointSaverListener):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def before_save(self, session, global_step_value):
print('About to write a checkpoint')
def after_save(self, session, global_step_value):
print('Done writing checkpoint.')
if decided_to_stop_training():
return True
def end(self, session, global_step_value):
print('Done with the session.')
...
listener = ExampleCheckpointSaverListener()
saver_hook = tf.estimator.CheckpointSaverHook(
checkpoint_dir, listeners=[listener])
with
tf.compat.v1.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]):
...
```
A `CheckpointSaverListener` may simply take some action after every
checkpoint save. It is also possible for the listener to use its own schedule
to act less frequently, e.g. based on global_step_value. In this case,
implementors should implement the `end()` method to handle actions related to
the last checkpoint save. But the listener should not act twice if
`after_save()` already handled this last checkpoint save.
A `CheckpointSaverListener` can request training to be stopped, by returning
True in `after_save`. Please note that, in replicated distributed training
setting, only `chief` should use this behavior. Otherwise each worker will do
their own evaluation, which may be wasteful of resources.
"""
def begin(self):
pass
def before_save(self, session, global_step_value):
pass
def after_save(self, session, global_step_value):
pass
def end(self, session, global_step_value):
pass
@tf_export(v1=["train.CheckpointSaverHook"])
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None,
save_graph_def=True):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
save_graph_def: Whether to save the GraphDef and MetaGraphDef to
`checkpoint_dir`. The GraphDef is saved after the session is created as
`graph.pbtxt`. MetaGraphDefs are saved out for every checkpoint as
`model.ckpt-*.meta`.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create CheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._save_graph_def = save_graph_def
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._save_graph_def:
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step +
self._steps_per_run):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
last_step = session.run(self._global_step_tensor)
if last_step != self._timer.last_triggered_step():
self._save(session, last_step)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step):
"""Saves the latest checkpoint, returns should_stop."""
logging.info("Calling checkpoint listeners before saving checkpoint %d...",
step)
for l in self._listeners:
l.before_save(session, step)
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._get_saver().save(session, self._save_path, global_step=step,
write_meta_graph=self._save_graph_def)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
logging.info("Calling checkpoint listeners after saving checkpoint %d...",
step)
should_stop = False
for l in self._listeners:
if l.after_save(session, step):
logging.info(
"A CheckpointSaverListener requested that training be stopped. "
"listener: {}".format(l))
should_stop = True
return should_stop
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
@tf_export(v1=["train.StepCounterHook"])
class StepCounterHook(session_run_hook.SessionRunHook):
"""Hook that counts steps per second."""
def __init__(self,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError(
"exactly one of every_n_steps and every_n_secs should be provided.")
self._timer = SecondOrStepTimer(
every_steps=every_n_steps, every_secs=every_n_secs)
self._summary_writer = summary_writer
self._output_dir = output_dir
self._last_global_step = None
self._steps_per_run = 1
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
self._summary_tag = training_util.get_global_step().op.name + "/sec"
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
steps_per_sec = elapsed_steps / elapsed_time
if self._summary_writer is not None:
summary = Summary(value=[
Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)
])
self._summary_writer.add_summary(summary, global_step)
logging.info("%s: %g", self._summary_tag, steps_per_sec)
def after_run(self, run_context, run_values):
_ = run_context
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step +
self._steps_per_run):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
self._log_and_record(elapsed_steps, elapsed_time, global_step)
# Check whether the global step has been increased. Here, we do not use the
# timer.last_triggered_step as the timer might record a different global
# step value such that the comparison could be unreliable. For simplicity,
# we just compare the stale_global_step with previously recorded version.
if stale_global_step == self._last_global_step:
# Here, we give a warning in the first 5 times if we have observed that
# the global step has not been increased. For some Optimizers, the global
# step is not increased each time by design. For example,
# SyncReplicaOptimizer doesn't increase the global step in worker's main
# train step.
logging.log_first_n(
logging.WARN,
"It seems that global step (tf.train.get_global_step) has not "
"been increased. Current value (could be stable): %s vs previous "
"value: %s. You could increase the global step by passing "
"tf.train.get_global_step() to Optimizer.apply_gradients or "
"Optimizer.minimize.", 5, stale_global_step, self._last_global_step)
self._last_global_step = stale_global_step
@tf_export(v1=["train.NanLossDuringTrainingError"])
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
@tf_export(v1=["train.NanTensorHook"])
class NanTensorHook(session_run_hook.SessionRunHook):
"""Monitors the loss tensor and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes a `NanTensorHook`.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
@tf_export(v1=["train.SummarySaverHook"])
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=None,
save_secs=None,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaverHook`.
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used if
no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output by
TF summary methods like `tf.compat.v1.summary.scalar` or
`tf.compat.v1.summary.merge_all`. It can be passed in as one tensor; if
more than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set.
"""
if ((scaffold is None and summary_op is None) or
(scaffold is not None and summary_op is not None)):
raise ValueError(
"Exactly one of scaffold or summary_op must be provided.")
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
self._timer = SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._next_step = None
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = (
self._next_step is None or
self._timer.should_trigger_for_step(self._next_step))
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
stale_global_step = run_values.results["global_step"]
global_step = stale_global_step + 1
if self._next_step is None or self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
if self._next_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._timer.update_last_triggered_step(global_step)
if "summary" in run_values.results:
for summary in run_values.results["summary"]:
self._summary_writer.add_summary(summary, global_step)
self._next_step = global_step + 1
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _get_summary_op(self):
"""Fetches the summary op either from self._summary_op or self._scaffold.
Returns:
Returns a list of summary `Tensor`.
"""
summary_op = None
if self._summary_op is not None:
summary_op = self._summary_op
elif self._scaffold.summary_op is not None:
summary_op = self._scaffold.summary_op
if summary_op is None:
return None
if not isinstance(summary_op, list):
return [summary_op]
return summary_op
@tf_export(v1=["train.GlobalStepWaiterHook"])
class GlobalStepWaiterHook(session_run_hook.SessionRunHook):
"""Delays execution until global step reaches `wait_until_step`.
This hook delays execution until global step reaches to `wait_until_step`. It
is used to gradually start workers in distributed settings. One example usage
would be setting `wait_until_step=int(K*log(task_id+1))` assuming that
task_id=0 is the chief.
"""
def __init__(self, wait_until_step):
"""Initializes a `GlobalStepWaiterHook`.
Args:
wait_until_step: an `int` shows until which global step should we wait.
"""
self._wait_until_step = wait_until_step
def begin(self):
self._worker_is_started = False
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use _GlobalStepWaiterHook.")
def before_run(self, run_context):
if self._worker_is_started:
return None
if self._wait_until_step <= 0:
self._worker_is_started = True
return None
logging.info("Waiting for global step %d before starting training.",
self._wait_until_step)
last_logged_step = 0
while True:
current_step = run_context.session.run(self._global_step_tensor)
if current_step >= self._wait_until_step:
self._worker_is_started = True
return None
if current_step - last_logged_step > 1000:
logging.info(
"Waiting for global step %d before starting training. "
"Current step is %d.", self._wait_until_step, current_step)
last_logged_step = current_step
time.sleep(0.5)
@tf_export(v1=["train.FinalOpsHook"])
class FinalOpsHook(session_run_hook.SessionRunHook):
"""A hook which evaluates `Tensors` at the end of a session."""
def __init__(self, final_ops, final_ops_feed_dict=None):
"""Initializes `FinalOpHook` with ops to run at the end of the session.
Args:
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when running
`final_ops_dict`.
"""
self._final_ops = final_ops
self._final_ops_feed_dict = final_ops_feed_dict
self._final_ops_values = None
@property
def final_ops_values(self):
return self._final_ops_values
def end(self, session):
if self._final_ops is not None:
try:
self._final_ops_values = session.run(
self._final_ops, feed_dict=self._final_ops_feed_dict)
except (errors.OutOfRangeError, StopIteration) as e:
logging.warning(
"An OutOfRangeError or StopIteration exception is raised by the "
"code in FinalOpsHook. This typically means the Ops running by the "
"FinalOpsHook have a dependency back to some input source, which "
"should not happen. For example, for metrics in "
"tf.estimator.Estimator, all metrics functions return two Ops: "
"`value_op` and `update_op`. Estimator.evaluate calls the "
"`update_op` for each batch of the data in input source and, once "
"it is exhausted, it call the `value_op` to get the metric values. "
"The `value_op` here should have dependency back to variables "
"reading only, rather than reading another batch from input. "
"Otherwise, the `value_op`, executed by `FinalOpsHook`, triggers "
"another data reading, which ends OutOfRangeError/StopIteration. "
"Please fix that.")
raise e
@tf_export(v1=["train.FeedFnHook"])
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs `feed_fn` and sets the `feed_dict` accordingly."""
def __init__(self, feed_fn):
"""Initializes a `FeedFnHook`.
Args:
feed_fn: function that takes no arguments and returns `dict` of `Tensor`
to feed.
"""
self.feed_fn = feed_fn
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
fetches=None, feed_dict=self.feed_fn())
@tf_export(v1=["train.ProfilerHook"])
class ProfilerHook(session_run_hook.SessionRunHook):
"""Captures CPU/GPU profiling information every N steps or seconds.
This produces files called "timeline-<step>.json", which are in Chrome
Trace format.
For more information see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
"""
def __init__(self,
save_steps=None,
save_secs=None,
output_dir="",
show_dataflow=True,
show_memory=False):
"""Initializes a hook that takes periodic profiling snapshots.
`options.run_metadata` argument of `tf.Session.Run` is used to collect
metadata about execution. This hook sets the metadata and dumps it in Chrome
Trace format.
Args:
save_steps: `int`, save profile traces every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int` or `float`, save profile traces every N seconds.
output_dir: `string`, the directory to save the profile traces to.
Defaults to the current directory.
show_dataflow: `bool`, if True, add flow events to the trace connecting
producers and consumers of tensors.
show_memory: `bool`, if True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
"""
self._output_file = os.path.join(output_dir, "timeline-{}.json")
self._file_writer = SummaryWriterCache.get(output_dir)
self._show_dataflow = show_dataflow
self._show_memory = show_memory
self._timer = SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
def begin(self):
self._next_step = None
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use ProfilerHook.")
def before_run(self, run_context):
self._request_summary = (
self._next_step is not None and
self._timer.should_trigger_for_step(self._next_step))
requests = {"global_step": self._global_step_tensor}
opts = (
config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
if self._request_summary else None)
return SessionRunArgs(requests, options=opts)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results["global_step"]
if self._next_step is None:
# Update the timer so that it does not activate until N steps or seconds
# have passed.
self._timer.update_last_triggered_step(stale_global_step)
global_step = stale_global_step + 1
if self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
self._timer.update_last_triggered_step(global_step)
self._save(global_step, self._output_file.format(global_step),
run_values.run_metadata.step_stats)
self._file_writer.add_run_metadata(run_values.run_metadata,
"step_%d" % global_step)
self._next_step = global_step + 1
def _save(self, step, save_path, step_stats):
logging.info("Saving timeline for %d into '%s'.", step, save_path)
with gfile.Open(save_path, "w") as f:
trace = timeline.Timeline(step_stats)
f.write(
trace.generate_chrome_trace_format(
show_dataflow=self._show_dataflow, show_memory=self._show_memory))
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from proto import prediction_pb2 as proto_dot_prediction__pb2
class GenericStub(object):
"""[END Messages]
[START Services]
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.TransformInput = channel.unary_unary(
'/seldon.protos.Generic/TransformInput',
request_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
self.TransformOutput = channel.unary_unary(
'/seldon.protos.Generic/TransformOutput',
request_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
self.Route = channel.unary_unary(
'/seldon.protos.Generic/Route',
request_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
self.Aggregate = channel.unary_unary(
'/seldon.protos.Generic/Aggregate',
request_serializer=proto_dot_prediction__pb2.SeldonMessageList.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
self.SendFeedback = channel.unary_unary(
'/seldon.protos.Generic/SendFeedback',
request_serializer=proto_dot_prediction__pb2.Feedback.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
class GenericServicer(object):
"""[END Messages]
[START Services]
"""
def TransformInput(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TransformOutput(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Route(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Aggregate(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendFeedback(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GenericServicer_to_server(servicer, server):
rpc_method_handlers = {
'TransformInput': grpc.unary_unary_rpc_method_handler(
servicer.TransformInput,
request_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
'TransformOutput': grpc.unary_unary_rpc_method_handler(
servicer.TransformOutput,
request_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
'Route': grpc.unary_unary_rpc_method_handler(
servicer.Route,
request_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
'Aggregate': grpc.unary_unary_rpc_method_handler(
servicer.Aggregate,
request_deserializer=proto_dot_prediction__pb2.SeldonMessageList.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
'SendFeedback': grpc.unary_unary_rpc_method_handler(
servicer.SendFeedback,
request_deserializer=proto_dot_prediction__pb2.Feedback.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'seldon.protos.Generic', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class ModelStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Predict = channel.unary_unary(
'/seldon.protos.Model/Predict',
request_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
class ModelServicer(object):
# missing associated documentation comment in .proto file
pass
def Predict(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ModelServicer_to_server(servicer, server):
rpc_method_handlers = {
'Predict': grpc.unary_unary_rpc_method_handler(
servicer.Predict,
request_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'seldon.protos.Model', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class RouterStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Route = channel.unary_unary(
'/seldon.protos.Router/Route',
request_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
self.SendFeedback = channel.unary_unary(
'/seldon.protos.Router/SendFeedback',
request_serializer=proto_dot_prediction__pb2.Feedback.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
class RouterServicer(object):
# missing associated documentation comment in .proto file
pass
def Route(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendFeedback(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RouterServicer_to_server(servicer, server):
rpc_method_handlers = {
'Route': grpc.unary_unary_rpc_method_handler(
servicer.Route,
request_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
'SendFeedback': grpc.unary_unary_rpc_method_handler(
servicer.SendFeedback,
request_deserializer=proto_dot_prediction__pb2.Feedback.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'seldon.protos.Router', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class TransformerStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.TransformInput = channel.unary_unary(
'/seldon.protos.Transformer/TransformInput',
request_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
class TransformerServicer(object):
# missing associated documentation comment in .proto file
pass
def TransformInput(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TransformerServicer_to_server(servicer, server):
rpc_method_handlers = {
'TransformInput': grpc.unary_unary_rpc_method_handler(
servicer.TransformInput,
request_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'seldon.protos.Transformer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class OutputTransformerStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.TransformOutput = channel.unary_unary(
'/seldon.protos.OutputTransformer/TransformOutput',
request_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
class OutputTransformerServicer(object):
# missing associated documentation comment in .proto file
pass
def TransformOutput(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OutputTransformerServicer_to_server(servicer, server):
rpc_method_handlers = {
'TransformOutput': grpc.unary_unary_rpc_method_handler(
servicer.TransformOutput,
request_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'seldon.protos.OutputTransformer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class CombinerStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Aggregate = channel.unary_unary(
'/seldon.protos.Combiner/Aggregate',
request_serializer=proto_dot_prediction__pb2.SeldonMessageList.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
class CombinerServicer(object):
# missing associated documentation comment in .proto file
pass
def Aggregate(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CombinerServicer_to_server(servicer, server):
rpc_method_handlers = {
'Aggregate': grpc.unary_unary_rpc_method_handler(
servicer.Aggregate,
request_deserializer=proto_dot_prediction__pb2.SeldonMessageList.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'seldon.protos.Combiner', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class SeldonStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Predict = channel.unary_unary(
'/seldon.protos.Seldon/Predict',
request_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
self.SendFeedback = channel.unary_unary(
'/seldon.protos.Seldon/SendFeedback',
request_serializer=proto_dot_prediction__pb2.Feedback.SerializeToString,
response_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
)
class SeldonServicer(object):
# missing associated documentation comment in .proto file
pass
def Predict(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendFeedback(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SeldonServicer_to_server(servicer, server):
rpc_method_handlers = {
'Predict': grpc.unary_unary_rpc_method_handler(
servicer.Predict,
request_deserializer=proto_dot_prediction__pb2.SeldonMessage.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
'SendFeedback': grpc.unary_unary_rpc_method_handler(
servicer.SendFeedback,
request_deserializer=proto_dot_prediction__pb2.Feedback.FromString,
response_serializer=proto_dot_prediction__pb2.SeldonMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'seldon.protos.Seldon', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
|
from __future__ import print_function
import unittest
from discretize import TensorMesh
from SimPEG import utils
import numpy as np
from SimPEG.electromagnetics import resistivity as dc
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
from geoana.em import fdem
from scipy.constants import mu_0, epsilon_0
class DC_CC_DipoleFullspaceTests(unittest.TestCase):
def setUp(self):
cs = 0.5
npad = 11
hx = [(cs, npad, -1.5), (cs, 15), (cs, npad, 1.5)]
hy = [(cs, npad, -1.5), (cs, 15), (cs, npad, 1.5)]
hz = [(cs, npad, -1.5), (cs, 15), (cs, npad, 1.5)]
mesh = TensorMesh([hx, hy, hz], x0="CCC")
sigma = np.ones(mesh.nC) * 1e-2
# Set up survey parameters for numeric solution
x = mesh.vectorCCx[(mesh.vectorCCx > -75.0) & (mesh.vectorCCx < 75.0)]
y = mesh.vectorCCy[(mesh.vectorCCy > -75.0) & (mesh.vectorCCy < 75.0)]
Aloc = np.r_[1.0, 0.0, 0.0]
Bloc = np.r_[-1.0, 0.0, 0.0]
M = utils.ndgrid(x - 25.0, y, np.r_[0.0])
N = utils.ndgrid(x + 25.0, y, np.r_[0.0])
rx = dc.receivers.Dipole(M, N)
src = dc.sources.Dipole([rx], Aloc, Bloc)
survey = dc.survey.Survey([src])
# Create Dipole Obj for Analytic Solution
edipole = fdem.ElectricDipoleWholeSpace(
sigma=1e-2, # conductivity of 1 S/m
mu=mu_0, # permeability of free space (this is the default)
epsilon=epsilon_0, # permittivity of free space (this is the default)
location=np.r_[0.0, 0.0, 0.0], # location of the dipole
orientation="X", # horizontal dipole (can also be a unit-vector)
quasistatic=True, # don't use the quasistatic assumption
frequency=0.0, # DC
length=2.0, # length of dipole
)
# evaluate the electric field and current density
Ex_analytic = np.zeros_like([mesh.nFx, 1])
Ey_analytic = np.zeros_like([mesh.nFy, 1])
Ez_analytic = np.zeros_like([mesh.nFz, 1])
Ex_analytic = np.real(edipole.electric_field(mesh.gridFx))[:, 0]
Ey_analytic = np.real(edipole.electric_field(mesh.gridFy))[:, 1]
Ez_analytic = np.real(edipole.electric_field(mesh.gridFz))[:, 2]
E_analytic = np.hstack([Ex_analytic, Ey_analytic, Ez_analytic])
Jx_analytic = np.zeros_like([mesh.nFx, 1])
Jy_analytic = np.zeros_like([mesh.nFy, 1])
Jz_analytic = np.zeros_like([mesh.nFz, 1])
Jx_analytic = np.real(edipole.current_density(mesh.gridFx))[:, 0]
Jy_analytic = np.real(edipole.current_density(mesh.gridFy))[:, 1]
Jz_analytic = np.real(edipole.current_density(mesh.gridFz))[:, 2]
J_analytic = np.hstack([Jx_analytic, Jy_analytic, Jz_analytic])
# Find faces at which to compare solutions
faceGrid = np.vstack([mesh.gridFx, mesh.gridFy, mesh.gridFz])
# print(faceGrid.shape)
ROI_large_BNW = np.array([-75, 75, -75])
ROI_large_TSE = np.array([75, -75, 75])
ROI_largeInds = utils.model_builder.getIndicesBlock(
ROI_large_BNW, ROI_large_TSE, faceGrid
)[0]
# print(ROI_largeInds.shape)
ROI_small_BNW = np.array([-4, 4, -4])
ROI_small_TSE = np.array([4, -4, 4])
ROI_smallInds = utils.model_builder.getIndicesBlock(
ROI_small_BNW, ROI_small_TSE, faceGrid
)[0]
# print(ROI_smallInds.shape)
ROIfaceInds = np.setdiff1d(ROI_largeInds, ROI_smallInds)
# print(ROIfaceInds.shape)
# print(len(ROI_largeInds) - len(ROI_smallInds))
self.survey = survey
self.mesh = mesh
self.sigma = sigma
self.E_analytic = E_analytic
self.J_analytic = J_analytic
self.ROIfaceInds = ROIfaceInds
def test_Simulation3DCellCentered_Dirichlet(self, tolerance=0.1):
simulation = dc.Simulation3DCellCentered(
self.mesh, survey=self.survey, sigma=self.sigma, bc_type="Dirichlet"
)
simulation.solver = Solver
# f = simulation.fields()
f = simulation.fields(self.sigma)
eNumeric = utils.mkvc(f[self.survey.source_list, "e"])
jNumeric = utils.mkvc(f[self.survey.source_list, "j"])
# also test we can get charge and charge density
f[:, "charge"]
f[:, "charge_density"]
errE = np.linalg.norm(
jNumeric[self.ROIfaceInds] - self.J_analytic[self.ROIfaceInds]
) / np.linalg.norm(self.J_analytic[self.ROIfaceInds])
errJ = np.linalg.norm(
eNumeric[self.ROIfaceInds] - self.E_analytic[self.ROIfaceInds]
) / np.linalg.norm(self.E_analytic[self.ROIfaceInds])
if errE < tolerance and errJ < tolerance:
print("\n")
print("E field error =", errE)
print("J field error =", errJ)
passed = True
print(">> DC analytic test for Simulation3DCellCentered_Dirichlet passed")
else:
print("\n")
print("E field error =", errE)
print("J field error =", errJ)
passed = False
print(">> DC analytic test for Simulation3DCellCentered_Dirchlet failed")
self.assertTrue(passed)
def test_Simulation3DCellCentered_Mixed(self, tolerance=0.1):
simulation = dc.simulation.Simulation3DCellCentered(
self.mesh, survey=self.survey, sigma=self.sigma, bc_type="Mixed"
)
simulation.solver = Solver
f = simulation.fields(self.sigma)
eNumeric = utils.mkvc(f[self.survey.source_list, "e"])
jNumeric = utils.mkvc(f[self.survey.source_list, "j"])
errE = np.linalg.norm(
jNumeric[self.ROIfaceInds] - self.J_analytic[self.ROIfaceInds]
) / np.linalg.norm(self.J_analytic[self.ROIfaceInds])
errJ = np.linalg.norm(
eNumeric[self.ROIfaceInds] - self.E_analytic[self.ROIfaceInds]
) / np.linalg.norm(self.E_analytic[self.ROIfaceInds])
if errE < tolerance and errJ < tolerance:
print("\n")
print("E field error =", errE)
print("J field error =", errJ)
passed = True
print(">> DC analytic test for Simulation3DCellCentered_Mixed passed")
else:
print("\n")
print("E field error =", errE)
print("J field error =", errJ)
passed = False
print(">> DC analytic test for Simulation3DCellCentered_Mixed failed")
self.assertTrue(passed)
def test_Simulation3DCellCentered_Neumann(self, tolerance=0.1):
simulation = dc.Simulation3DCellCentered(
self.mesh, survey=self.survey, sigma=self.sigma, bc_type="Neumann"
)
simulation.solver = Solver
f = simulation.fields(self.sigma)
eNumeric = utils.mkvc(f[self.survey.source_list, "e"])
jNumeric = utils.mkvc(f[self.survey.source_list, "j"])
errE = np.linalg.norm(
jNumeric[self.ROIfaceInds] - self.J_analytic[self.ROIfaceInds]
) / np.linalg.norm(self.J_analytic[self.ROIfaceInds])
errJ = np.linalg.norm(
eNumeric[self.ROIfaceInds] - self.E_analytic[self.ROIfaceInds]
) / np.linalg.norm(self.E_analytic[self.ROIfaceInds])
if errE < tolerance and errJ < tolerance:
print("\n")
print("E field error =", errE)
print("J field error =", errJ)
passed = True
print(">> DC analytic test for Simulation3DCellCentered_Neumann passed")
else:
print("\n")
print("E field error =", errE)
print("J field error =", errJ)
passed = False
print(">> DC analytic test for Simulation3DCellCentered_Neumann failed")
self.assertTrue(passed)
class DC_N_DipoleFullspaceTests(unittest.TestCase):
def setUp(self):
cs = 0.5
npad = 11
hx = [(cs, npad, -1.5), (cs, 15), (cs, npad, 1.5)]
hy = [(cs, npad, -1.5), (cs, 15), (cs, npad, 1.5)]
hz = [(cs, npad, -1.5), (cs, 15), (cs, npad, 1.5)]
mesh = TensorMesh([hx, hy, hz], x0="CCC")
sigma = np.ones(mesh.nC) * 1e-2
# Set up survey parameters for numeric solution
x = mesh.vectorNx[(mesh.vectorNx > -75.0) & (mesh.vectorNx < 75.0)]
y = mesh.vectorNy[(mesh.vectorNy > -75.0) & (mesh.vectorNy < 75.0)]
Aloc = np.r_[1.25, 0.0, 0.0]
Bloc = np.r_[-1.25, 0.0, 0.0]
M = utils.ndgrid(x - 25.0, y, np.r_[0.0])
N = utils.ndgrid(x + 25.0, y, np.r_[0.0])
rx = dc.receivers.Dipole(M, N)
src = dc.sources.Dipole([rx], Aloc, Bloc)
survey = dc.survey.Survey([src])
# Create Dipole Obj for Analytic Solution
edipole = fdem.ElectricDipoleWholeSpace(
sigma=1e-2, # conductivity of 1 S/m
mu=mu_0, # permeability of free space (this is the default)
epsilon=epsilon_0, # permittivity of free space (this is the default)
location=np.r_[0.0, 0.0, 0.0], # location of the dipole
orientation="X", # horizontal dipole (can also be a unit-vector)
quasistatic=True, # don't use the quasistatic assumption
frequency=0.0, # DC
length=2.5, # length of dipole
)
# evaluate the electric field and current density
Ex_analytic = np.zeros_like([mesh.nEx, 1])
Ey_analytic = np.zeros_like([mesh.nEy, 1])
Ez_analytic = np.zeros_like([mesh.nEz, 1])
Ex_analytic = np.real(edipole.electric_field(mesh.gridEx))[:, 0]
Ey_analytic = np.real(edipole.electric_field(mesh.gridEy))[:, 1]
Ez_analytic = np.real(edipole.electric_field(mesh.gridEz))[:, 2]
E_analytic = np.hstack([Ex_analytic, Ey_analytic, Ez_analytic])
Jx_analytic = np.zeros_like([mesh.nEx, 1])
Jy_analytic = np.zeros_like([mesh.nEy, 1])
Jz_analytic = np.zeros_like([mesh.nEz, 1])
Jx_analytic = np.real(edipole.current_density(mesh.gridEx))[:, 0]
Jy_analytic = np.real(edipole.current_density(mesh.gridEy))[:, 1]
Jz_analytic = np.real(edipole.current_density(mesh.gridEz))[:, 2]
J_analytic = np.hstack([Jx_analytic, Jy_analytic, Jz_analytic])
# Find edges at which to compare solutions
edgeGrid = np.vstack([mesh.gridEx, mesh.gridEy, mesh.gridEz])
# print(faceGrid.shape)
ROI_large_BNW = np.array([-75, 75, -75])
ROI_large_TSE = np.array([75, -75, 75])
ROI_largeInds = utils.model_builder.getIndicesBlock(
ROI_large_BNW, ROI_large_TSE, edgeGrid
)[0]
# print(ROI_largeInds.shape)
ROI_small_BNW = np.array([-4, 4, -4])
ROI_small_TSE = np.array([4, -4, 4])
ROI_smallInds = utils.model_builder.getIndicesBlock(
ROI_small_BNW, ROI_small_TSE, edgeGrid
)[0]
# print(ROI_smallInds.shape)
ROIedgeInds = np.setdiff1d(ROI_largeInds, ROI_smallInds)
# print(ROIedgeInds.shape)
# print(len(ROI_largeInds) - len(ROI_smallInds))
self.survey = survey
self.mesh = mesh
self.sigma = sigma
self.E_analytic = E_analytic
self.J_analytic = J_analytic
self.ROIedgeInds = ROIedgeInds
def test_Simulation3DNodal(self, tolerance=0.1):
simulation = dc.simulation.Simulation3DNodal(
self.mesh, survey=self.survey, sigma=self.sigma
)
simulation.solver = Solver
f = simulation.fields(self.sigma)
eNumeric = utils.mkvc(f[self.survey.source_list, "e"])
jNumeric = utils.mkvc(f[self.survey.source_list, "j"])
# also test if we can get charge and charge_density
f[:, "charge"]
f[:, "charge_density"]
errE = np.linalg.norm(
jNumeric[self.ROIedgeInds] - self.J_analytic[self.ROIedgeInds]
) / np.linalg.norm(self.J_analytic[self.ROIedgeInds])
errJ = np.linalg.norm(
eNumeric[self.ROIedgeInds] - self.E_analytic[self.ROIedgeInds]
) / np.linalg.norm(self.E_analytic[self.ROIedgeInds])
if errE < tolerance and errJ < tolerance:
print("\n")
print("E field error =", errE)
print("J field error =", errJ)
passed = True
print(">> DC analytic test for Simulation3DNodal passed")
else:
print("\n")
print("E field error =", errE)
print("J field error =", errJ)
passed = False
print(">> DC analytic test for Simulation3DNodal failed")
self.assertTrue(passed)
if __name__ == "__main__":
unittest.main()
|
|
from django.core.exceptions import ImproperlyConfigured
from ..settings import PUSH_NOTIFICATIONS_SETTINGS as SETTINGS
from .base import BaseConfig, check_apns_certificate
SETTING_MISMATCH = (
"Application '{application_id}' ({platform}) does not support the setting '{setting}'."
)
# code can be "missing" or "invalid"
BAD_PLATFORM = (
'PUSH_NOTIFICATIONS_SETTINGS.APPLICATIONS["{application_id}"]["PLATFORM"] is {code}. '
"Must be one of: {platforms}."
)
UNKNOWN_PLATFORM = (
"Unknown Platform: {platform}. Must be one of: {platforms}."
)
MISSING_SETTING = (
'PUSH_NOTIFICATIONS_SETTINGS.APPLICATIONS["{application_id}"]["{setting}"] is missing.'
)
PLATFORMS = [
"APNS",
"FCM",
"GCM",
"WNS",
"WP",
]
# Settings that all applications must have
REQUIRED_SETTINGS = [
"PLATFORM",
]
# Settings that an application may have to enable optional features
# these settings are stubs for registry support and have no effect on the operation
# of the application at this time.
OPTIONAL_SETTINGS = [
"APPLICATION_GROUP", "APPLICATION_SECRET"
]
# Since we can have an auth key, combined with a auth key id and team id *or*
# a certificate, we make these all optional, and then make sure we have one or
# the other (group) of settings.
APNS_SETTINGS_CERT_CREDS = "CERTIFICATE"
# Subkeys for APNS_SETTINGS_AUTH_CREDS
APNS_AUTH_CREDS_REQUIRED = ["AUTH_KEY_PATH", "AUTH_KEY_ID", "TEAM_ID"]
APNS_AUTH_CREDS_OPTIONAL = ["CERTIFICATE", "ENCRYPTION_ALGORITHM", "TOKEN_LIFETIME"]
APNS_OPTIONAL_SETTINGS = [
"USE_SANDBOX", "USE_ALTERNATIVE_PORT", "TOPIC"
]
FCM_REQUIRED_SETTINGS = GCM_REQUIRED_SETTINGS = ["API_KEY"]
FCM_OPTIONAL_SETTINGS = GCM_OPTIONAL_SETTINGS = [
"POST_URL", "MAX_RECIPIENTS", "ERROR_TIMEOUT"
]
WNS_REQUIRED_SETTINGS = ["PACKAGE_SECURITY_ID", "SECRET_KEY"]
WNS_OPTIONAL_SETTINGS = ["WNS_ACCESS_URL"]
WP_REQUIRED_SETTINGS = ["PRIVATE_KEY", "CLAIMS"]
WP_OPTIONAL_SETTINGS = ["ERROR_TIMEOUT", "POST_URL"]
class AppConfig(BaseConfig):
"""
Supports any number of push notification enabled applications.
"""
def __init__(self, settings=None):
# supports overriding the settings to be loaded. Will load from ..settings by default.
self._settings = settings or SETTINGS
# initialize APPLICATIONS to an empty collection
self._settings.setdefault("APPLICATIONS", {})
# validate application configurations
self._validate_applications(self._settings["APPLICATIONS"])
def _validate_applications(self, apps):
"""Validate the application collection"""
for application_id, application_config in apps.items():
self._validate_config(application_id, application_config)
application_config["APPLICATION_ID"] = application_id
def _validate_config(self, application_id, application_config):
platform = application_config.get("PLATFORM", None)
# platform is not present
if platform is None:
raise ImproperlyConfigured(
BAD_PLATFORM.format(
application_id=application_id,
code="required",
platforms=", ".join(PLATFORMS)
)
)
# platform is not a valid choice from PLATFORMS
if platform not in PLATFORMS:
raise ImproperlyConfigured(
BAD_PLATFORM.format(
application_id=application_id,
code="invalid",
platforms=", ".join(PLATFORMS)
)
)
validate_fn = "_validate_{platform}_config".format(platform=platform).lower()
if hasattr(self, validate_fn):
getattr(self, validate_fn)(application_id, application_config)
else:
raise ImproperlyConfigured(
UNKNOWN_PLATFORM.format(
platform=platform,
platforms=", ".join(PLATFORMS)
)
)
def _validate_apns_config(self, application_id, application_config):
allowed = REQUIRED_SETTINGS + OPTIONAL_SETTINGS + \
APNS_AUTH_CREDS_REQUIRED + \
APNS_AUTH_CREDS_OPTIONAL + \
APNS_OPTIONAL_SETTINGS
self._validate_allowed_settings(application_id, application_config, allowed)
# We have two sets of settings, certificate and JWT auth key.
# Auth Key requires 3 values, so if that is set, that will take
# precedence. If None are set, we will throw an error.
has_cert_creds = APNS_SETTINGS_CERT_CREDS in \
application_config.keys()
self.has_token_creds = True
for token_setting in APNS_AUTH_CREDS_REQUIRED:
if token_setting not in application_config.keys():
self.has_token_creds = False
break
if not has_cert_creds and not self.has_token_creds:
raise ImproperlyConfigured(
MISSING_SETTING.format(
application_id=application_id,
setting=(APNS_SETTINGS_CERT_CREDS, APNS_AUTH_CREDS_REQUIRED)))
cert_path = None
if has_cert_creds:
cert_path = "CERTIFICATE"
elif self.has_token_creds:
cert_path = "AUTH_KEY_PATH"
allowed_tokens = APNS_AUTH_CREDS_REQUIRED + \
APNS_AUTH_CREDS_OPTIONAL + \
APNS_OPTIONAL_SETTINGS + \
REQUIRED_SETTINGS
self._validate_allowed_settings(application_id, application_config, allowed_tokens)
self._validate_required_settings(
application_id, application_config, APNS_AUTH_CREDS_REQUIRED
)
self._validate_apns_certificate(application_config[cert_path])
# determine/set optional values
application_config.setdefault("USE_SANDBOX", False)
application_config.setdefault("USE_ALTERNATIVE_PORT", False)
application_config.setdefault("TOPIC", None)
def _validate_apns_certificate(self, certfile):
"""Validate the APNS certificate at startup."""
try:
with open(certfile, "r") as f:
content = f.read()
check_apns_certificate(content)
except Exception as e:
raise ImproperlyConfigured(
"The APNS certificate file at {!r} is not readable: {}".format(certfile, e)
)
def _validate_fcm_config(self, application_id, application_config):
allowed = (
REQUIRED_SETTINGS + OPTIONAL_SETTINGS + FCM_REQUIRED_SETTINGS + FCM_OPTIONAL_SETTINGS
)
self._validate_allowed_settings(application_id, application_config, allowed)
self._validate_required_settings(
application_id, application_config, FCM_REQUIRED_SETTINGS
)
application_config.setdefault("POST_URL", "https://fcm.googleapis.com/fcm/send")
application_config.setdefault("MAX_RECIPIENTS", 1000)
application_config.setdefault("ERROR_TIMEOUT", None)
def _validate_gcm_config(self, application_id, application_config):
allowed = (
REQUIRED_SETTINGS + OPTIONAL_SETTINGS + GCM_REQUIRED_SETTINGS + GCM_OPTIONAL_SETTINGS
)
self._validate_allowed_settings(application_id, application_config, allowed)
self._validate_required_settings(
application_id, application_config, GCM_REQUIRED_SETTINGS
)
application_config.setdefault("POST_URL", "https://android.googleapis.com/gcm/send")
application_config.setdefault("MAX_RECIPIENTS", 1000)
application_config.setdefault("ERROR_TIMEOUT", None)
def _validate_wns_config(self, application_id, application_config):
allowed = (
REQUIRED_SETTINGS + OPTIONAL_SETTINGS + WNS_REQUIRED_SETTINGS + WNS_OPTIONAL_SETTINGS
)
self._validate_allowed_settings(application_id, application_config, allowed)
self._validate_required_settings(
application_id, application_config, WNS_REQUIRED_SETTINGS
)
application_config.setdefault("WNS_ACCESS_URL", "https://login.live.com/accesstoken.srf")
def _validate_wp_config(self, application_id, application_config):
allowed = (
REQUIRED_SETTINGS + OPTIONAL_SETTINGS + WP_REQUIRED_SETTINGS + WP_OPTIONAL_SETTINGS
)
self._validate_allowed_settings(application_id, application_config, allowed)
self._validate_required_settings(
application_id, application_config, WP_REQUIRED_SETTINGS
)
application_config.setdefault("POST_URL", {
"CHROME": "https://fcm.googleapis.com/fcm/send",
"OPERA": "https://fcm.googleapis.com/fcm/send",
"EDGE": "https://wns2-par02p.notify.windows.com/w",
"FIREFOX": "https://updates.push.services.mozilla.com/wpush/v2",
})
def _validate_allowed_settings(self, application_id, application_config, allowed_settings):
"""Confirm only allowed settings are present."""
for setting_key in application_config.keys():
if setting_key not in allowed_settings:
raise ImproperlyConfigured(
"Platform {}, app {} does not support the setting: {}.".format(
application_config["PLATFORM"], application_id, setting_key
)
)
def _validate_required_settings(
self, application_id, application_config, required_settings,
should_throw=True
):
"""All required keys must be present"""
for setting_key in required_settings:
if setting_key not in application_config.keys():
if should_throw:
raise ImproperlyConfigured(
MISSING_SETTING.format(
application_id=application_id, setting=setting_key
)
)
else:
return False
return True
def _get_application_settings(self, application_id, platform, settings_key):
"""
Walks through PUSH_NOTIFICATIONS_SETTINGS to find the correct setting value
or raises ImproperlyConfigured.
"""
if not application_id:
conf_cls = "push_notifications.conf.AppConfig"
raise ImproperlyConfigured(
"{} requires the application_id be specified at all times.".format(conf_cls)
)
# verify that the application config exists
app_config = self._settings.get("APPLICATIONS").get(application_id, None)
if app_config is None:
raise ImproperlyConfigured(
"No application configured with application_id: {}.".format(application_id)
)
# fetch a setting for the incorrect type of platform
if app_config.get("PLATFORM") != platform:
raise ImproperlyConfigured(
SETTING_MISMATCH.format(
application_id=application_id,
platform=app_config.get("PLATFORM"),
setting=settings_key
)
)
# finally, try to fetch the setting
if settings_key not in app_config:
raise ImproperlyConfigured(
MISSING_SETTING.format(
application_id=application_id, setting=settings_key
)
)
return app_config.get(settings_key)
def has_auth_token_creds(self, application_id=None):
return self.has_token_creds
def get_gcm_api_key(self, application_id=None):
return self._get_application_settings(application_id, "GCM", "API_KEY")
def get_fcm_api_key(self, application_id=None):
return self._get_application_settings(application_id, "FCM", "API_KEY")
def get_post_url(self, cloud_type, application_id=None):
return self._get_application_settings(application_id, cloud_type, "POST_URL")
def get_error_timeout(self, cloud_type, application_id=None):
return self._get_application_settings(application_id, cloud_type, "ERROR_TIMEOUT")
def get_max_recipients(self, cloud_type, application_id=None):
return self._get_application_settings(application_id, cloud_type, "MAX_RECIPIENTS")
def get_apns_certificate(self, application_id=None):
r = self._get_application_settings(application_id, "APNS", "CERTIFICATE")
if not isinstance(r, str):
# probably the (Django) file, and file path should be got
if hasattr(r, "path"):
return r.path
elif (hasattr(r, "has_key") or hasattr(r, "__contains__")) and "path" in r:
return r["path"]
else:
raise ImproperlyConfigured(
"The APNS certificate settings value should be a string, or "
"should have a 'path' attribute or key"
)
return r
def get_apns_auth_creds(self, application_id=None):
return \
(self._get_apns_auth_key_path(application_id),
self._get_apns_auth_key_id(application_id),
self._get_apns_team_id(application_id))
def _get_apns_auth_key_path(self, application_id=None):
return self._get_application_settings(application_id, "APNS", "AUTH_KEY_PATH")
def _get_apns_auth_key_id(self, application_id=None):
return self._get_application_settings(application_id, "APNS", "AUTH_KEY_ID")
def _get_apns_team_id(self, application_id=None):
return self._get_application_settings(application_id, "APNS", "TEAM_ID")
def get_apns_use_sandbox(self, application_id=None):
return self._get_application_settings(application_id, "APNS", "USE_SANDBOX")
def get_apns_use_alternative_port(self, application_id=None):
return self._get_application_settings(application_id, "APNS", "USE_ALTERNATIVE_PORT")
def get_apns_topic(self, application_id=None):
return self._get_application_settings(application_id, "APNS", "TOPIC")
def get_wns_package_security_id(self, application_id=None):
return self._get_application_settings(application_id, "WNS", "PACKAGE_SECURITY_ID")
def get_wns_secret_key(self, application_id=None):
return self._get_application_settings(application_id, "WNS", "SECRET_KEY")
def get_wp_post_url(self, application_id, browser):
return self._get_application_settings(application_id, "WP", "POST_URL")[browser]
def get_wp_private_key(self, application_id=None):
return self._get_application_settings(application_id, "WP", "PRIVATE_KEY")
def get_wp_claims(self, application_id=None):
return self._get_application_settings(application_id, "WP", "CLAIMS")
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of modules and dispatch requests to them."""
import collections
import logging
import socket
import threading
import urlparse
import wsgiref.headers
from google.appengine.api import appinfo
from google.appengine.api import request_info
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import module
from google.appengine.tools.devappserver2 import scheduled_executor
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import wsgi_server
# This file uses pep8 naming.
# pylint: disable=invalid-name
_THREAD_POOL = thread_executor.ThreadExecutor()
ResponseTuple = collections.namedtuple('ResponseTuple',
['status', 'headers', 'content'])
# This must be kept in sync with dispatch_ah_url_path_prefix_whitelist in
# google/production/borg/apphosting/templates/frontend.borg.
DISPATCH_AH_URL_PATH_PREFIX_WHITELIST = ('/_ah/queue/deferred',)
class PortRegistry(object):
def __init__(self):
self._ports = {}
self._ports_lock = threading.RLock()
def add(self, port, _module, inst):
with self._ports_lock:
self._ports[port] = (_module, inst)
def get(self, port):
with self._ports_lock:
return self._ports[port]
class Dispatcher(request_info.Dispatcher):
"""A devappserver2 implementation of request_info.Dispatcher.
In addition to the request_info.Dispatcher interface, it owns modules and
manages their lifetimes.
"""
def __init__(self,
configuration,
host,
port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
module_to_max_instances,
use_mtime_file_watcher,
automatic_restart,
allow_skipped_files,
module_to_threadsafe_override,
external_port):
"""Initializer for Dispatcher.
Args:
configuration: An application_configuration.ApplicationConfiguration
instance storing the configuration data for the app.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
port: An int specifying the first port where servers should listen.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
java_config: A runtime_config_pb2.JavaConfig instance containing Java
runtime-specific configuration. If None then defaults are used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration.
module_to_max_instances: A mapping between a module name and the maximum
number of instances that can be created (this overrides the settings
found in the configuration argument) e.g.
{'default': 10, 'backend': 15}.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restart: If True then instances will be restarted when a
file or configuration change that affects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
module_to_threadsafe_override: A mapping between the module name and what
to override the module's YAML threadsafe configuration (so modules
not named continue to use their YAML configuration).
external_port: The port on which the single external module is expected
to listen, or None if there are no external modules. This will later
be changed so that the association between external modules and their
ports is more flexible.
"""
self._configuration = configuration
self._php_config = php_config
self._python_config = python_config
self._java_config = java_config
self._cloud_sql_config = cloud_sql_config
self._vm_config = vm_config
self._request_data = None
self._api_host = None
self._api_port = None
self._running_modules = []
self._module_configurations = {}
self._host = host
self._port = port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._module_name_to_module = {}
self._dispatch_server = None
self._quit_event = threading.Event() # Set when quit() has been called.
self._update_checking_thread = threading.Thread(
target=self._loop_checking_for_updates,
name='Dispatcher Update Checking')
self._module_to_max_instances = module_to_max_instances or {}
self._use_mtime_file_watcher = use_mtime_file_watcher
self._automatic_restart = automatic_restart
self._allow_skipped_files = allow_skipped_files
self._module_to_threadsafe_override = module_to_threadsafe_override
self._executor = scheduled_executor.ScheduledExecutor(_THREAD_POOL)
self._port_registry = PortRegistry()
self._external_port = external_port
def start(self, api_host, api_port, request_data):
"""Starts the configured modules.
Args:
api_host: The hostname that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
"""
self._api_host = api_host
self._api_port = api_port
self._request_data = request_data
port = self._port
self._executor.start()
if self._configuration.dispatch:
self._dispatch_server = wsgi_server.WsgiServer((self._host, port), self)
self._dispatch_server.start()
logging.info('Starting dispatcher running at: http://%s:%s', self._host,
self._dispatch_server.port)
self._update_checking_thread.start()
if port:
port += 1
self._port_registry.add(self._dispatch_server.port, None, None)
for module_configuration in self._configuration.modules:
self._module_configurations[
module_configuration.module_name] = module_configuration
_module, port = self._create_module(module_configuration, port)
_module.start()
self._module_name_to_module[module_configuration.module_name] = _module
logging.info('Starting module "%s" running at: http://%s',
module_configuration.module_name, _module.balanced_address)
@property
def dispatch_port(self):
"""The port that the dispatch HTTP server for the Module is listening on."""
assert self._dispatch_server, 'dispatch server not running'
assert self._dispatch_server.ready, 'dispatch server not ready'
return self._dispatch_server.port
@property
def host(self):
"""The host that the HTTP server for this Dispatcher is listening on."""
return self._host
@property
def dispatch_address(self):
"""The address of the dispatch HTTP server e.g. "localhost:8080"."""
if self.dispatch_port != 80:
return '%s:%s' % (self.host, self.dispatch_port)
else:
return self.host
def _check_for_updates(self):
self._configuration.dispatch.check_for_updates()
def _loop_checking_for_updates(self):
"""Loops until the Dispatcher exits, reloading dispatch.yaml config."""
while not self._quit_event.is_set():
self._check_for_updates()
self._quit_event.wait(timeout=1)
def quit(self):
"""Quits all modules."""
self._executor.quit()
self._quit_event.set()
if self._dispatch_server:
self._dispatch_server.quit()
for _module in self._module_name_to_module.values():
_module.quit()
def _create_module(self, module_configuration, port):
max_instances = self._module_to_max_instances.get(
module_configuration.module_name)
threadsafe_override = self._module_to_threadsafe_override.get(
module_configuration.module_name)
if self._external_port:
# TODO: clean this up
module_configuration.external_port = self._external_port
module_class = module.ExternalModule
elif (module_configuration.manual_scaling or
module_configuration.runtime == 'vm'):
# TODO: Remove this 'or' when we support auto-scaled VMs.
module_class = module.ManualScalingModule
elif module_configuration.basic_scaling:
module_class = module.BasicScalingModule
else:
module_class = module.AutoScalingModule
module_instance = module_class(
module_configuration=module_configuration,
host=self._host,
balanced_port=port,
api_host=self._api_host,
api_port=self._api_port,
auth_domain=self._auth_domain,
runtime_stderr_loglevel=self._runtime_stderr_loglevel,
php_config=self._php_config,
python_config=self._python_config,
java_config=self._java_config,
cloud_sql_config=self._cloud_sql_config,
vm_config=self._vm_config,
default_version_port=self._port,
port_registry=self._port_registry,
request_data=self._request_data,
dispatcher=self,
max_instances=max_instances,
use_mtime_file_watcher=self._use_mtime_file_watcher,
automatic_restarts=self._automatic_restart,
allow_skipped_files=self._allow_skipped_files,
threadsafe_override=threadsafe_override)
return module_instance, (0 if port == 0 else port + 1)
@property
def modules(self):
return self._module_name_to_module.values()
def get_hostname(self, module_name, version, instance_id=None):
"""Returns the hostname for a (module, version, instance_id) tuple.
If instance_id is set, this will return a hostname for that particular
instances. Otherwise, it will return the hostname for load-balancing.
Returning 0.0.0.0 is modified to be a more useful address to the user.
Args:
module_name: A str containing the name of the module.
version: A str containing the version.
instance_id: An optional str containing the instance ID.
Returns:
A str containing the hostname.
Raises:
request_info.ModuleDoesNotExistError: The module does not exist.
request_info.VersionDoesNotExistError: The version does not exist.
request_info.InvalidInstanceIdError: The instance ID is not valid for the
module/version or the module/version uses automatic scaling.
"""
_module = self._get_module(module_name, version)
if instance_id is None:
hostname = _module.balanced_address
else:
hostname = _module.get_instance_address(instance_id)
parts = hostname.split(':')
# 0.0.0.0 or 0 binds to all interfaces but only connects to localhost.
# Convert to an address that can connect from local and remote machines.
# TODO: handle IPv6 bind-all address (::).
try:
if socket.inet_aton(parts[0]) == '\0\0\0\0':
hostname = ':'.join([socket.gethostname()] + parts[1:])
except socket.error:
# socket.inet_aton raised an exception so parts[0] is not an IP address.
pass
return hostname
def get_module_names(self):
"""Returns a list of module names."""
return list(self._module_name_to_module)
def get_module_by_name(self, _module):
"""Returns the module with the given name.
Args:
_module: A str containing the name of the module.
Returns:
The module.Module with the provided name.
Raises:
request_info.ModuleDoesNotExistError: The module does not exist.
"""
try:
return self._module_name_to_module[_module]
except KeyError:
raise request_info.ModuleDoesNotExistError(_module)
def get_versions(self, _module):
"""Returns a list of versions for a module.
Args:
_module: A str containing the name of the module.
Returns:
A list of str containing the versions for the specified module.
Raises:
request_info.ModuleDoesNotExistError: The module does not exist.
"""
if _module in self._module_configurations:
return [self._module_configurations[_module].major_version]
else:
raise request_info.ModuleDoesNotExistError(_module)
def get_default_version(self, _module):
"""Returns the default version for a module.
Args:
_module: A str containing the name of the module.
Returns:
A str containing the default version for the specified module.
Raises:
request_info.ModuleDoesNotExistError: The module does not exist.
"""
if _module in self._module_configurations:
return self._module_configurations[_module].major_version
else:
raise request_info.ModuleDoesNotExistError(_module)
def add_event(self, runnable, eta, service=None, event_id=None):
"""Add a callable to be run at the specified time.
Args:
runnable: A callable object to call at the specified time.
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
This should be set if event_id is set.
event_id: A str containing the id of the event. If set, this can be passed
to update_event to change the time at which the event should run.
"""
if service is not None and event_id is not None:
key = (service, event_id)
else:
key = None
self._executor.add_event(runnable, eta, key)
def update_event(self, eta, service, event_id):
"""Update the eta of a scheduled event.
Args:
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
event_id: A str containing the id of the event to update.
"""
self._executor.update_event(eta, (service, event_id))
def _get_module(self, module_name, version):
"""Attempts to find the specified module.
Args:
module_name: The name of the module.
version: The version id.
Returns:
Module object.
Raises:
request_info.ModuleDoesNotExistError: The module doesn't exist.
request_info.VersionDoesNotExistError: The version doesn't exist.
"""
if not module_name:
module_name = appinfo.DEFAULT_MODULE
if module_name not in self._module_name_to_module:
raise request_info.ModuleDoesNotExistError()
if (version is not None and
version != self._module_configurations[module_name].major_version):
raise request_info.VersionDoesNotExistError()
return self._module_name_to_module[module_name]
def _get_module_with_soft_routing(self, module_name, version):
"""Uses soft-routing to find the specified module.
Soft-routing is an attempt to match the production resolution order, which
is slightly more permissive than the Modules API behavior. Here are the
rules allowed:
1. If a module is requested that doesn't exist, use the default module.
2. If a module is requested that doesn't exist, and there is no default
module, use any module.
Args:
module_name: The name of the module.
version: The version id.
Returns:
Module object.
Raises:
request_info.ModuleDoesNotExistError: The module doesn't exist.
request_info.VersionDoesNotExistError: The version doesn't exist.
"""
if not module_name or module_name not in self._module_name_to_module:
if appinfo.DEFAULT_MODULE in self._module_name_to_module:
module_name = appinfo.DEFAULT_MODULE
elif self._module_name_to_module:
# If there is no default module, but there are other modules, take any.
# This is somewhat of a hack, and can be removed if we ever enforce the
# existence of a default module.
module_name = self._module_name_to_module.keys()[0]
else:
raise request_info.ModuleDoesNotExistError(module_name)
if (version is not None and
version != self._module_configurations[module_name].major_version):
raise request_info.VersionDoesNotExistError()
return self._module_name_to_module[module_name]
def set_num_instances(self, module_name, version, num_instances):
"""Sets the number of instances to run for a version of a module.
Args:
module_name: A str containing the name of the module.
version: A str containing the version.
num_instances: An int containing the number of instances to run.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
self._get_module(module_name, version).set_num_instances(num_instances)
def get_num_instances(self, module_name, version):
"""Returns the number of instances running for a version of a module.
Returns:
An int containing the number of instances running for a module version.
Args:
module_name: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
return self._get_module(module_name, version).get_num_instances()
def start_version(self, module_name, version):
"""Starts a version of a module.
Args:
module_name: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
self._get_module(module_name, version).resume()
def stop_version(self, module_name, version):
"""Stops a version of a module.
Args:
module_name: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
self._get_module(module_name, version).suspend()
def send_background_request(self, module_name, version, inst,
background_request_id):
"""Dispatch a background thread request.
Args:
module_name: A str containing the module name to service this
request.
version: A str containing the version to service this request.
inst: The instance to service this request.
background_request_id: A str containing the unique background thread
request identifier.
Raises:
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
BackgroundThreadLimitReachedError: The instance is at its background
thread capacity.
"""
_module = self._get_module_with_soft_routing(module_name, version)
try:
inst.reserve_background_thread()
except instance.CannotAcceptRequests:
raise request_info.BackgroundThreadLimitReachedError()
port = _module.get_instance_port(inst.instance_id)
environ = _module.build_request_environ(
'GET', '/_ah/background',
[('X-AppEngine-BackgroundRequest', background_request_id)],
'', '0.1.0.3', port)
_THREAD_POOL.submit(self._handle_request,
environ,
start_response_utils.null_start_response,
_module,
inst,
request_type=instance.BACKGROUND_REQUEST,
catch_and_log_exceptions=True)
# TODO: Think of better names for add_async_request and
# add_request.
def add_async_request(self, method, relative_url, headers, body, source_ip,
module_name=None, version=None, instance_id=None):
"""Dispatch an HTTP request asynchronously.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
module_name: An optional str containing the module name to service this
request. If unset, the request will be dispatched to the default
module.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched to the default version.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched to
according to the load-balancing for the module and version.
"""
if module_name:
_module = self._get_module_with_soft_routing(module_name, version)
else:
_module = self._module_for_request(urlparse.urlsplit(relative_url).path)
inst = _module.get_instance(instance_id) if instance_id else None
port = _module.get_instance_port(instance_id) if instance_id else (
_module.balanced_port)
environ = _module.build_request_environ(method, relative_url, headers, body,
source_ip, port)
_THREAD_POOL.submit(self._handle_request,
environ,
start_response_utils.null_start_response,
_module,
inst,
catch_and_log_exceptions=True)
def add_request(self, method, relative_url, headers, body, source_ip,
module_name=None, version=None, instance_id=None,
fake_login=False):
"""Process an HTTP request.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
module_name: An optional str containing the module name to service this
request. If unset, the request will be dispatched according to the
host header and relative_url.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched according to the host header
and relative_url.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched
according to the host header and relative_url and, if applicable, the
load-balancing for the module and version.
fake_login: A bool indicating whether login checks should be bypassed,
i.e. "login: required" should be ignored for this request.
Returns:
A request_info.ResponseTuple containing the response information for the
HTTP request.
"""
if module_name:
_module = self._get_module_with_soft_routing(module_name, version)
inst = _module.get_instance(instance_id) if instance_id else None
else:
headers_dict = wsgiref.headers.Headers(headers)
_module, inst = self._resolve_target(
headers_dict['Host'], urlparse.urlsplit(relative_url).path)
if inst:
try:
port = _module.get_instance_port(inst.instance_id)
except request_info.NotSupportedWithAutoScalingError:
port = _module.balanced_port
else:
port = _module.balanced_port
environ = _module.build_request_environ(method, relative_url, headers, body,
source_ip, port,
fake_login=fake_login)
start_response = start_response_utils.CapturingStartResponse()
response = self._handle_request(environ,
start_response,
_module,
inst)
# merged_response can have side effects which modify start_response.*, so
# we cannot safely inline it into the ResponseTuple initialization below.
merged = start_response.merged_response(response)
return request_info.ResponseTuple(start_response.status,
start_response.response_headers,
merged)
def _resolve_target(self, hostname, path):
"""Returns the module and instance that should handle this request.
Args:
hostname: A string containing the value of the host header in the request
or None if one was not present.
path: A string containing the path of the request.
Returns:
A tuple (_module, inst) where:
_module: The module.Module that should handle this request.
inst: The instance.Instance that should handle this request or None if
the module's load balancing should decide on the instance.
Raises:
request_info.ModuleDoesNotExistError: if hostname is not known.
"""
if self._port == 80:
default_address = self.host
else:
default_address = '%s:%s' % (self.host, self._port)
if not hostname or hostname == default_address:
return self._module_for_request(path), None
default_address_offset = hostname.find(default_address)
if default_address_offset > 0:
prefix = hostname[:default_address_offset - 1]
# The prefix should be 'module', but might be 'instance.version.module',
# 'version.module', or 'instance.module'. These alternatives work in
# production, but devappserver2 doesn't support running multiple versions
# of the same module. All we can really do is route to the default
# version of the specified module.
if '.' in prefix:
logging.warning('Ignoring instance/version in %s; multiple versions '
'are not supported in devappserver.', prefix)
module_name = prefix.split('.')[-1]
return self._get_module_with_soft_routing(module_name, None), None
else:
if ':' in hostname:
port = int(hostname.split(':', 1)[1])
else:
port = 80
try:
_module, inst = self._port_registry.get(port)
except KeyError:
raise request_info.ModuleDoesNotExistError(hostname)
if not _module:
_module = self._module_for_request(path)
return _module, inst
def _handle_request(self, environ, start_response, _module,
inst=None, request_type=instance.NORMAL_REQUEST,
catch_and_log_exceptions=False):
"""Dispatch a WSGI request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
_module: The module to dispatch this request to.
inst: The instance to service this request. If None, the module will
be left to choose the instance to serve this request.
request_type: The request_type of this request. See instance.*_REQUEST
module constants.
catch_and_log_exceptions: A bool containing whether to catch and log
exceptions in handling the request instead of leaving it for the
caller to handle.
Returns:
An iterable over the response to the request as defined in PEP-333.
"""
try:
return _module._handle_request(environ, start_response, inst=inst,
request_type=request_type)
except:
if catch_and_log_exceptions:
logging.exception('Internal error while handling request.')
else:
raise
def __call__(self, environ, start_response):
return self._handle_request(
environ, start_response, self._module_for_request(environ['PATH_INFO']))
def _should_use_dispatch_config(self, path):
"""Determines whether or not to use the dispatch config.
Args:
path: The request path.
Returns:
A Boolean indicating whether or not to use the rules in dispatch config.
"""
if (not path.startswith('/_ah/') or
any(path.startswith(wl) for wl
in DISPATCH_AH_URL_PATH_PREFIX_WHITELIST)):
return True
else:
logging.warning('Skipping dispatch.yaml rules because %s is not a '
'dispatchable path.', path)
return False
def _module_for_request(self, path):
dispatch = self._configuration.dispatch
if dispatch and self._should_use_dispatch_config(path):
for url, module_name in dispatch.dispatch:
if (url.path_exact and path == url.path or
not url.path_exact and path.startswith(url.path)):
return self._get_module_with_soft_routing(module_name, None)
return self._get_module_with_soft_routing(None, None)
|
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows specific tests. These are implicitly run by test_psutil.py."""
import errno
import os
import platform
import signal
import subprocess
import sys
import time
import traceback
from test_psutil import APPVEYOR, WINDOWS
from test_psutil import get_test_subprocess, reap_children, unittest
import mock
try:
import wmi
except ImportError:
wmi = None
try:
import win32api
import win32con
except ImportError:
win32api = win32con = None
from psutil._compat import PY3, callable, long
import psutil
cext = psutil._psplatform.cext
def wrap_exceptions(fun):
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
from psutil._pswindows import ACCESS_DENIED_SET
if err.errno in ACCESS_DENIED_SET:
raise psutil.AccessDenied(None, None)
if err.errno == errno.ESRCH:
raise psutil.NoSuchProcess(None, None)
raise
return wrapper
@unittest.skipUnless(WINDOWS, "not a Windows system")
class WindowsSpecificTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pid = get_test_subprocess().pid
@classmethod
def tearDownClass(cls):
reap_children()
def test_issue_24(self):
p = psutil.Process(0)
self.assertRaises(psutil.AccessDenied, p.kill)
def test_special_pid(self):
p = psutil.Process(4)
self.assertEqual(p.name(), 'System')
# use __str__ to access all common Process properties to check
# that nothing strange happens
str(p)
p.username()
self.assertTrue(p.create_time() >= 0.0)
try:
rss, vms = p.memory_info()
except psutil.AccessDenied:
# expected on Windows Vista and Windows 7
if not platform.uname()[1] in ('vista', 'win-7', 'win7'):
raise
else:
self.assertTrue(rss > 0)
def test_send_signal(self):
p = psutil.Process(self.pid)
self.assertRaises(ValueError, p.send_signal, signal.SIGINT)
def test_nic_names(self):
p = subprocess.Popen(['ipconfig', '/all'], stdout=subprocess.PIPE)
out = p.communicate()[0]
if PY3:
out = str(out, sys.stdout.encoding)
nics = psutil.net_io_counters(pernic=True).keys()
for nic in nics:
if "pseudo-interface" in nic.replace(' ', '-').lower():
continue
if nic not in out:
self.fail(
"%r nic wasn't found in 'ipconfig /all' output" % nic)
def test_exe(self):
for p in psutil.process_iter():
try:
self.assertEqual(os.path.basename(p.exe()), p.name())
except psutil.Error:
pass
# --- Process class tests
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_name(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
self.assertEqual(p.name(), w.Caption)
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_exe(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
# Note: wmi reports the exe as a lower case string.
# Being Windows paths case-insensitive we ignore that.
self.assertEqual(p.exe().lower(), w.ExecutablePath.lower())
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_cmdline(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
self.assertEqual(' '.join(p.cmdline()),
w.CommandLine.replace('"', ''))
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_username(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
domain, _, username = w.GetOwner()
username = "%s\\%s" % (domain, username)
self.assertEqual(p.username(), username)
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_rss_memory(self):
time.sleep(0.1)
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
rss = p.memory_info().rss
self.assertEqual(rss, int(w.WorkingSetSize))
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_vms_memory(self):
time.sleep(0.1)
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
vms = p.memory_info().vms
# http://msdn.microsoft.com/en-us/library/aa394372(VS.85).aspx
# ...claims that PageFileUsage is represented in Kilo
# bytes but funnily enough on certain platforms bytes are
# returned instead.
wmi_usage = int(w.PageFileUsage)
if (vms != wmi_usage) and (vms != wmi_usage * 1024):
self.fail("wmi=%s, psutil=%s" % (wmi_usage, vms))
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_process_create_time(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
wmic_create = str(w.CreationDate.split('.')[0])
psutil_create = time.strftime("%Y%m%d%H%M%S",
time.localtime(p.create_time()))
self.assertEqual(wmic_create, psutil_create)
# --- psutil namespace functions and constants tests
@unittest.skipUnless('NUMBER_OF_PROCESSORS' in os.environ,
'NUMBER_OF_PROCESSORS env var is not available')
def test_cpu_count(self):
num_cpus = int(os.environ['NUMBER_OF_PROCESSORS'])
self.assertEqual(num_cpus, psutil.cpu_count())
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_total_phymem(self):
w = wmi.WMI().Win32_ComputerSystem()[0]
self.assertEqual(int(w.TotalPhysicalMemory),
psutil.virtual_memory().total)
# @unittest.skipIf(wmi is None, "wmi module is not installed")
# def test__UPTIME(self):
# # _UPTIME constant is not public but it is used internally
# # as value to return for pid 0 creation time.
# # WMI behaves the same.
# w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
# p = psutil.Process(0)
# wmic_create = str(w.CreationDate.split('.')[0])
# psutil_create = time.strftime("%Y%m%d%H%M%S",
# time.localtime(p.create_time()))
#
# Note: this test is not very reliable
@unittest.skipIf(wmi is None, "wmi module is not installed")
@unittest.skipIf(APPVEYOR, "test not relieable on appveyor")
def test_pids(self):
# Note: this test might fail if the OS is starting/killing
# other processes in the meantime
w = wmi.WMI().Win32_Process()
wmi_pids = set([x.ProcessId for x in w])
psutil_pids = set(psutil.pids())
self.assertEqual(wmi_pids, psutil_pids)
@unittest.skipIf(wmi is None, "wmi module is not installed")
def test_disks(self):
ps_parts = psutil.disk_partitions(all=True)
wmi_parts = wmi.WMI().Win32_LogicalDisk()
for ps_part in ps_parts:
for wmi_part in wmi_parts:
if ps_part.device.replace('\\', '') == wmi_part.DeviceID:
if not ps_part.mountpoint:
# this is usually a CD-ROM with no disk inserted
break
try:
usage = psutil.disk_usage(ps_part.mountpoint)
except OSError as err:
if err.errno == errno.ENOENT:
# usually this is the floppy
break
else:
raise
self.assertEqual(usage.total, int(wmi_part.Size))
wmi_free = int(wmi_part.FreeSpace)
self.assertEqual(usage.free, wmi_free)
# 10 MB tollerance
if abs(usage.free - wmi_free) > 10 * 1024 * 1024:
self.fail("psutil=%s, wmi=%s" % (
usage.free, wmi_free))
break
else:
self.fail("can't find partition %s" % repr(ps_part))
@unittest.skipIf(win32api is None, "pywin32 module is not installed")
def test_num_handles(self):
p = psutil.Process(os.getpid())
before = p.num_handles()
handle = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
win32con.FALSE, os.getpid())
after = p.num_handles()
self.assertEqual(after, before + 1)
win32api.CloseHandle(handle)
self.assertEqual(p.num_handles(), before)
@unittest.skipIf(win32api is None, "pywin32 module is not installed")
def test_num_handles_2(self):
# Note: this fails from time to time; I'm keen on thinking
# it doesn't mean something is broken
def call(p, attr):
attr = getattr(p, name, None)
if attr is not None and callable(attr):
attr()
else:
attr
p = psutil.Process(self.pid)
failures = []
for name in dir(psutil.Process):
if name.startswith('_') \
or name in ('terminate', 'kill', 'suspend', 'resume',
'nice', 'send_signal', 'wait', 'children',
'as_dict'):
continue
else:
try:
call(p, name)
num1 = p.num_handles()
call(p, name)
num2 = p.num_handles()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
else:
if num2 > num1:
fail = \
"failure while processing Process.%s method " \
"(before=%s, after=%s)" % (name, num1, num2)
failures.append(fail)
if failures:
self.fail('\n' + '\n'.join(failures))
def test_name_always_available(self):
# On Windows name() is never supposed to raise AccessDenied,
# see https://github.com/giampaolo/psutil/issues/627
for p in psutil.process_iter():
try:
p.name()
except psutil.NoSuchProcess():
pass
@unittest.skipUnless(WINDOWS, "not a Windows system")
class TestDualProcessImplementation(unittest.TestCase):
"""
Certain APIs on Windows have 2 internal implementations, one
based on documented Windows APIs, another one based
NtQuerySystemInformation() which gets called as fallback in
case the first fails because of limited permission error.
Here we test that the two methods return the exact same value,
see:
https://github.com/giampaolo/psutil/issues/304
"""
fun_names = [
# function name, tolerance
('proc_cpu_times', 0.2),
('proc_create_time', 0.5),
('proc_num_handles', 1), # 1 because impl #1 opens a handle
('proc_memory_info', 1024), # KB
('proc_io_counters', 0),
]
def test_compare_values(self):
def assert_ge_0(obj):
if isinstance(obj, tuple):
for value in obj:
self.assertGreaterEqual(value, 0, msg=obj)
elif isinstance(obj, (int, long, float)):
self.assertGreaterEqual(obj, 0)
else:
assert 0 # case not handled which needs to be fixed
def compare_with_tolerance(ret1, ret2, tolerance):
if ret1 == ret2:
return
else:
if isinstance(ret2, (int, long, float)):
diff = abs(ret1 - ret2)
self.assertLessEqual(diff, tolerance)
elif isinstance(ret2, tuple):
for a, b in zip(ret1, ret2):
diff = abs(a - b)
self.assertLessEqual(diff, tolerance)
from psutil._pswindows import ntpinfo
failures = []
for p in psutil.process_iter():
try:
nt = ntpinfo(*cext.proc_info(p.pid))
except psutil.NoSuchProcess:
continue
assert_ge_0(nt)
for name, tolerance in self.fun_names:
if name == 'proc_memory_info' and p.pid == os.getpid():
continue
if name == 'proc_create_time' and p.pid in (0, 4):
continue
meth = wrap_exceptions(getattr(cext, name))
try:
ret = meth(p.pid)
except (psutil.NoSuchProcess, psutil.AccessDenied):
continue
# compare values
try:
if name == 'proc_cpu_times':
compare_with_tolerance(ret[0], nt.user_time, tolerance)
compare_with_tolerance(ret[1],
nt.kernel_time, tolerance)
elif name == 'proc_create_time':
compare_with_tolerance(ret, nt.create_time, tolerance)
elif name == 'proc_num_handles':
compare_with_tolerance(ret, nt.num_handles, tolerance)
elif name == 'proc_io_counters':
compare_with_tolerance(ret[0], nt.io_rcount, tolerance)
compare_with_tolerance(ret[1], nt.io_wcount, tolerance)
compare_with_tolerance(ret[2], nt.io_rbytes, tolerance)
compare_with_tolerance(ret[3], nt.io_wbytes, tolerance)
elif name == 'proc_memory_info':
try:
rawtupl = cext.proc_memory_info_2(p.pid)
except psutil.NoSuchProcess:
continue
compare_with_tolerance(ret, rawtupl, tolerance)
except AssertionError:
trace = traceback.format_exc()
msg = '%s\npid=%s, method=%r, ret_1=%r, ret_2=%r' % (
trace, p.pid, name, ret, nt)
failures.append(msg)
break
if failures:
self.fail('\n\n'.join(failures))
# ---
# same tests as above but mimicks the AccessDenied failure of
# the first (fast) method failing with AD.
# TODO: currently does not take tolerance into account.
def test_name(self):
name = psutil.Process().name()
with mock.patch("psutil._psplatform.cext.proc_exe",
side_effect=psutil.AccessDenied(os.getpid())) as fun:
psutil.Process().name() == name
assert fun.called
def test_memory_info(self):
mem = psutil.Process().memory_info()
with mock.patch("psutil._psplatform.cext.proc_memory_info",
side_effect=OSError(errno.EPERM, "msg")) as fun:
psutil.Process().memory_info() == mem
assert fun.called
def test_create_time(self):
ctime = psutil.Process().create_time()
with mock.patch("psutil._psplatform.cext.proc_create_time",
side_effect=OSError(errno.EPERM, "msg")) as fun:
psutil.Process().create_time() == ctime
assert fun.called
def test_cpu_times(self):
cpu_times = psutil.Process().cpu_times()
with mock.patch("psutil._psplatform.cext.proc_cpu_times",
side_effect=OSError(errno.EPERM, "msg")) as fun:
psutil.Process().cpu_times() == cpu_times
assert fun.called
def test_io_counters(self):
io_counters = psutil.Process().io_counters()
with mock.patch("psutil._psplatform.cext.proc_io_counters",
side_effect=OSError(errno.EPERM, "msg")) as fun:
psutil.Process().io_counters() == io_counters
assert fun.called
def test_num_handles(self):
io_counters = psutil.Process().io_counters()
with mock.patch("psutil._psplatform.cext.proc_io_counters",
side_effect=OSError(errno.EPERM, "msg")) as fun:
psutil.Process().io_counters() == io_counters
assert fun.called
# --- other tests
def test_compare_name_exe(self):
for p in psutil.process_iter():
try:
a = os.path.basename(p.exe())
b = p.name()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
else:
self.assertEqual(a, b)
def test_zombies(self):
# test that NPS is raised by the 2nd implementation in case a
# process no longer exists
ZOMBIE_PID = max(psutil.pids()) + 5000
for name, _ in self.fun_names:
meth = wrap_exceptions(getattr(cext, name))
self.assertRaises(psutil.NoSuchProcess, meth, ZOMBIE_PID)
def main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(WindowsSpecificTestCase))
test_suite.addTest(unittest.makeSuite(TestDualProcessImplementation))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not main():
sys.exit(1)
|
|
"""
Use these tags and filter when you're rolling your own search results.
This is intended to be a whole templatetags module. I keep it in my apps
as templatetags/search.py. These should not be used to perform search
queries, but rather render the results.
Basics
-------
There are three functions, each has both a tag and a filter of the same name.
These functions accept, at a minimum, a body of text and a list of search terms:
searchexcerpt: Truncate the text so that each search term is shown,
surrounded by some number of words of context.
highlight: Wrap all found search terms in an HTML span that can be styled to
highlight the terms.
hits: Count the occurrences of the search terms in the text.
The filters provide the most basic functionality as described above, while the tags
offer more options as arguments, such as case sensitivity, whole word search, and
saving the results to a context variable.
Settings
---------
Defaults for both the tags and filters can be changed with the following
settings. Note that these settings are merely a convenience for the tags,
which accept
these as arguments, but are necessary for changing behavior of the filters.
* SEARCH_CONTEXT_WORDS: Number of words to show on the left and right of
each search term. Default: 10
* SEARCH_IGNORE_CASE: False for case sensitive, True otherwise. Default: True
* SEARCH_WORD_BOUNDARY: Find whole words and not strings in the middle of
words. Default: False
* SEARCH_HIGHLIGHT_CLASS: The class to give the HTML span element when
wrapping highlighted search terms. Default: "highlight"
Examples
---------
Suppose you have a list flatpages resulting from a search query, and the
search terms (split into a list) are in the context variable terms. This will
show 5 words of context around each term and highlight matches in the title:
```
{% for page in flatpages %}
<h3>{{ page.title|highlight:terms }}</h3>
<p>
{% searchexcerpt terms 5 %}
{{ page.content|striptags }}
{% endsearchexcerpt %}
</p>
{% endfor %}
```
Add highlighting to the excerpt, and use a custom span class (the two flags
are for case insensitivity and respecting word boundaries):
```
{% highlight 1 1 "match" %}
{% searchexcerpt terms 5 1 1 %}
{{ page.content|striptags }}
{% endsearchexcerpt %}
{% endhighlight %}
```
Show the number of hits in the body:
```
<h3>{{ page.title }}
(Hits: {{ page.content|striptags|hits:terms }})
</h3>
```
All tags support an as name suffix, in which case an object will be stored in
the template context with the given name; output will be suppressed.
This is more efficient when you want both the excerpt and the number of hits.
The stored object depends on the tag:
* searchexcerpt: A dictionary with keys "original" (the text searched),
"excerpt" (the summarized text with search terms),
and "hits" (the number of hits in the text).
* searchcontext: A dictionary with keys "original", "highlighted", and "hits",
with obvious values.
* hits: Just the number of hits, nothing special.
Getting both the hits and the excerpt with "as":
```
{% searchexcerpt terms 3 as content %}
{{ page.content|striptags }}
{% endsearchexcerpt %}
<p>Hits: {{ content.hits }}<br>{{ content.excerpt }}</p>
```
More
----
For more examples see [Brian Beck's Text Adventure][1].
[1]: http://blog.brianbeck.com/post/29707610
"""
from itertools import ifilter
import re
from django import template
from django.conf import settings
from django.template import Node, TemplateSyntaxError
from django.utils.safestring import mark_safe
register = template.Library()
SETTINGS_PREFIX = 'SEARCH_'
SETTINGS_DEFAULTS = {
'CONTEXT_WORDS': 10,
'IGNORE_CASE': True,
'WORD_BOUNDARY': False,
'HIGHLIGHT_CLASS': "match"
}
def get_setting(name):
return getattr(settings, SETTINGS_PREFIX + name, SETTINGS_DEFAULTS[name])
def searchexcerpt(text, phrases, context_words=None, ignore_case=None, word_boundary=None):
if isinstance(phrases, basestring):
phrases = [phrases]
if context_words is None:
context_words = get_setting('CONTEXT_WORDS')
if ignore_case is None:
ignore_case = get_setting('IGNORE_CASE')
if word_boundary is None:
word_boundary = get_setting('WORD_BOUNDARY')
phrases = map(re.escape, phrases)
flags = ignore_case and re.I or 0
exprs = [re.compile(r"^%s$" % p, flags) for p in phrases]
whitespace = re.compile(r'\s+')
re_template = word_boundary and r"\b(%s)\b" or r"(%s)"
pieces = re.compile(re_template % "|".join(phrases), flags).split(text)
matches = {}
word_lists = []
index = {}
for i, piece in enumerate(pieces):
word_lists.append(whitespace.split(piece))
if i % 2:
index[i] = expr = ifilter(lambda e: e.match(piece), exprs).next()
matches.setdefault(expr, []).append(i)
def merge(lists):
merged = []
lists = [i for i in lists if i]
for words in lists:
if merged:
merged[-1] += words[0]
del words[0]
merged.extend(words)
return merged
i = 0
merged = []
for j in map(min, matches.itervalues()):
merged.append(merge(word_lists[i:j]))
merged.append(word_lists[j])
i = j + 1
merged.append(merge(word_lists[i:]))
output = []
for i, words in enumerate(merged):
omit = None
if i == len(merged) - 1:
omit = slice(max(1, 2 - i) * context_words + 1, None)
elif i == 0:
omit = slice(-context_words - 1)
elif not i % 2:
omit = slice(context_words + 1, -context_words - 1)
if omit and words[omit]:
words[omit] = ["..."]
output.append(" ".join(words))
return dict(original=text, excerpt="".join(output), hits=len(index))
class FunctionProxyNode(Node):
def __init__(self, nodelist, args, variable_name=None):
self.nodelist = nodelist
self.args = args
self.variable_name = variable_name
def render(self, context):
args = [arg.resolve(context) for arg in self.args]
text = self.nodelist.render(context)
value = self.get_value(text, *args)
if self.variable_name:
context[self.variable_name] = value
return ""
else:
return self.string_value(value)
def get_value(self, *args):
raise NotImplementedError
def string_value(self, value):
return value
class SearchContextNode(FunctionProxyNode):
def get_value(self, *args):
return searchexcerpt(*args)
def string_value(self, value):
return value['excerpt']
@register.tag(name='searchexcerpt')
def searchexcerpt_tag(parser, token):
"""
{% searchexcerpt search_terms [context_words] [ignore_case] [word_boundary] [as name] %}
...text...
{% endsearchexcerpt %}
"""
bits = list(token.split_contents())
if not 3 <= len(bits) <= 8:
usage = searchexcerpt_tag.__doc__.strip()
raise TemplateSyntaxError("%r expected usage: %s" % (bits[0], usage))
if len(bits) > 4 and bits[-2] == "as":
args, name = bits[1:-2], bits[-1]
else:
args, name = bits[1:], None
nodelist = parser.parse(('endsearchexcerpt',))
parser.delete_first_token()
return SearchContextNode(nodelist, map(parser.compile_filter, args), name)
@register.filter(name='searchexcerpt')
def searchexcerpt_filter(value, arg):
return searchexcerpt(value, arg)['excerpt']
searchexcerpt_filter.is_safe = True
def highlight(text, phrases, ignore_case=None, word_boundary=None, class_name=None):
if isinstance(phrases, basestring):
phrases = [phrases]
if ignore_case is None:
ignore_case = get_setting('IGNORE_CASE')
if word_boundary is None:
word_boundary = get_setting('WORD_BOUNDARY')
if class_name is None:
class_name = get_setting('HIGHLIGHT_CLASS')
phrases = map(re.escape, phrases)
flags = ignore_case and re.I or 0
re_template = word_boundary and r"\b(%s)\b" or r"(%s)"
expr = re.compile(re_template % "|".join(phrases), flags)
inner_expr = re.compile('<a[^>]+?href="[^>]*?(%s)$' % "|".join(phrases), flags)
template = '<span class="%s">%%s</span>' % class_name
matches = []
def replace(match):
if not word_boundary:
span = match.span()
if inner_expr.search(text, span[0] - 100, span[1]):
return match.group(0)
matches.append(match)
return template % match.group(0)
highlighted = mark_safe(expr.sub(replace, text))
count = len(matches)
return dict(original=text, highlighted=highlighted, hits=count)
class HighlightNode(FunctionProxyNode):
def get_value(self, *args):
return highlight(*args)
def string_value(self, value):
return value['highlighted']
@register.tag(name='highlight')
def highlight_tag(parser, token):
"""
{% highlight search_terms [ignore_case] [word_boundary] [class_name] [as name] %}
...text...
{% endhighlight %}
"""
bits = list(token.split_contents())
if not 2 <= len(bits) <= 7:
usage = highlight_tag.__doc__.strip()
raise TemplateSyntaxError("%r expected usage: %s" % (bits[0], usage))
if len(bits) > 3 and bits[-2] == "as":
args, name = bits[1:-2], bits[-1]
else:
args, name = bits[1:], None
nodelist = parser.parse(('endhighlight',))
parser.delete_first_token()
return HighlightNode(nodelist, map(parser.compile_filter, args), name)
@register.filter(name='highlight')
def highlight_filter(value, arg):
return highlight(value, arg)['highlighted']
def hits(text, phrases, ignore_case=None, word_boundary=None):
if isinstance(phrases, basestring):
phrases = [phrases]
if ignore_case is None:
ignore_case = get_setting('IGNORE_CASE')
if word_boundary is None:
word_boundary = get_setting('WORD_BOUNDARY')
phrases = map(re.escape, phrases)
flags = ignore_case and re.I or 0
re_template = word_boundary and r"\b(%s)\b" or r"(%s)"
expr = re.compile(re_template % "|".join(phrases), flags)
return len(expr.findall(text))
class HitsNode(FunctionProxyNode):
def get_value(self, *args):
return hits(*args)
def string_value(self, value):
return "%d" % value
@register.tag(name='hits')
def hits_tag(parser, token):
"""
{% hits search_terms [ignore_case] [word_boundary] [as name] %}
...text...
{% endhits %}
"""
bits = list(token.split_contents())
if not 2 <= len(bits) <= 6:
usage = hits_tag.__doc__.strip()
raise TemplateSyntaxError("%r expected usage: %s" % (bits[0], usage))
if len(bits) > 3 and bits[-2] == "as":
args, name = bits[1:-2], bits[-1]
else:
args, name = bits[1:], None
nodelist = parser.parse(('endhits',))
parser.delete_first_token()
return HitsNode(nodelist, map(parser.compile_filter, args), name)
@register.filter(name='hits')
def hits_filter(value, arg):
return hits(value, arg)
hits.is_safe = True
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from nailgun.consts import BOND_MODES
from nailgun.consts import BOND_XMIT_HASH_POLICY
from nailgun.consts import NETWORK_INTERFACE_TYPES
from nailgun.settings import settings
from nailgun.test.base import BaseIntegrationTest
from nailgun.utils import reverse
class TestNodeNICsBonding(BaseIntegrationTest):
def setUp(self):
super(TestNodeNICsBonding, self).setUp()
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{"name": "eth0",
"mac": "00:00:00:00:00:66",
"pxe": True,
"offloading_modes": [
{
"name": "mode_1",
"state": None,
"sub": []
},
{
"name": "mode_common",
"state": None,
"sub": []
}
]
},
{"name": "eth1",
"mac": "00:00:00:00:00:77",
"offloading_modes": [
{
"name": "mode_2",
"state": None,
"sub": []
},
{
"name": "mode_common",
"state": None,
"sub": []
}
]
},
{"name": "eth2",
"mac": "00:00:00:00:00:88",
"offloading_modes": [
{
"name": "mode_3",
"state": None,
"sub": []
},
{
"name": "mode_4",
"state": None,
"sub": []
},
{
"name": "mode_common",
"state": None,
"sub": []
}
]}])
self.env.create(
cluster_kwargs={
"net_provider": "neutron",
"net_segment_type": "gre"
},
nodes_kwargs=[
{"api": True,
"pending_addition": True,
"meta": meta}
]
)
self.get_node_nics_info()
def get_node_nics_info(self):
resp = self.app.get(
reverse("NodeNICsHandler",
kwargs={"node_id": self.env.nodes[0]["id"]}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.data = resp.json_body
self.admin_nic, self.other_nic, self.empty_nic = None, None, None
for nic in self.data:
net_names = [n["name"] for n in nic["assigned_networks"]]
if "fuelweb_admin" in net_names:
self.admin_nic = nic
elif net_names:
self.other_nic = nic
else:
self.empty_nic = nic
self.assertTrue(self.admin_nic and self.other_nic and self.empty_nic)
def put_single(self):
return self.env.node_nics_put(self.env.nodes[0]["id"], self.data,
expect_errors=True)
def put_collection(self):
nodes_list = [{"id": self.env.nodes[0]["id"],
"interfaces": self.data}]
return self.env.node_collection_nics_put(nodes_list,
expect_errors=True)
def node_nics_put_check_error(self, message):
for put_func in (self.put_single, self.put_collection):
resp = put_func()
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json_body["message"], message)
def nics_bond_create(self, put_func):
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": BOND_MODES.balance_slb,
"slaves": [
{"name": self.other_nic["name"]},
{"name": self.empty_nic["name"]}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
resp = put_func()
self.assertEqual(resp.status_code, 200)
resp = self.env.node_nics_get(self.env.nodes[0]["id"])
self.assertEqual(resp.status_code, 200)
bonds = filter(
lambda iface: iface["type"] == NETWORK_INTERFACE_TYPES.bond,
resp.json_body)
self.assertEqual(len(bonds), 1)
self.assertEqual(bonds[0]["name"], 'ovs-bond0')
bond_offloading_modes = bonds[0]['offloading_modes']
self.assertEqual(len(bond_offloading_modes), 1)
self.assertDictEqual(
bond_offloading_modes[0],
{'name': 'mode_common',
'state': None,
'sub': []})
def nics_bond_create_w_properties(self, put_func):
self.data.append({
"name": 'bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"bond_properties": {
"mode": BOND_MODES.l_802_3ad,
"xmit_hash_policy": BOND_XMIT_HASH_POLICY.layer2_3,
"lacp_rate": "slow",
"type__": "linux"
},
"slaves": [
{"name": self.other_nic["name"]},
{"name": self.empty_nic["name"]}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
resp = put_func()
self.assertEqual(resp.status_code, 200)
resp = self.env.node_nics_get(self.env.nodes[0]["id"])
self.assertEqual(resp.status_code, 200)
bonds = filter(
lambda iface: iface["type"] == NETWORK_INTERFACE_TYPES.bond,
resp.json_body)
self.assertEqual(len(bonds), 1)
self.assertEqual(bonds[0]["name"], 'bond0')
bond_offloading_modes = bonds[0]['offloading_modes']
self.assertEqual(len(bond_offloading_modes), 1)
self.assertDictEqual(
bond_offloading_modes[0],
{'name': 'mode_common',
'state': None,
'sub': []})
def nics_bond_remove(self, put_func):
resp = self.env.node_nics_get(self.env.nodes[0]["id"])
self.assertEqual(resp.status_code, 200)
self.data = resp.json_body
for nic in self.data:
if nic["type"] == NETWORK_INTERFACE_TYPES.bond:
bond = nic
break
else:
raise Exception("No bond was found unexpectedly")
for nic in self.data:
if nic["name"] == bond["slaves"][0]["name"]:
nic["assigned_networks"] = bond["assigned_networks"]
break
else:
raise Exception("NIC from bond wasn't found unexpectedly")
self.data.remove(bond)
resp = put_func()
self.assertEqual(resp.status_code, 200)
def test_nics_bond_delete(self):
for put_func in (self.put_single, self.put_collection):
self.get_node_nics_info()
self.nics_bond_create(put_func)
self.nics_bond_remove(put_func)
resp = self.env.node_nics_get(self.env.nodes[0]["id"])
self.assertEqual(resp.status_code, 200)
for nic in resp.json_body:
self.assertNotEqual(nic["type"], NETWORK_INTERFACE_TYPES.bond)
def test_nics_linux_bond_create_delete(self):
for put_func in (self.put_single, self.put_collection):
self.get_node_nics_info()
self.nics_bond_create_w_properties(put_func)
self.nics_bond_remove(put_func)
resp = self.env.node_nics_get(self.env.nodes[0]["id"])
self.assertEqual(resp.status_code, 200)
for nic in resp.json_body:
self.assertNotEqual(nic["type"], NETWORK_INTERFACE_TYPES.bond)
def test_nics_bond_removed_on_node_unassign(self):
self.get_node_nics_info()
self.nics_bond_create(self.put_single)
node = self.env.nodes[0]
resp = self.app.post(
reverse(
'NodeUnassignmentHandler',
kwargs={'cluster_id': self.env.clusters[0]['id']}
),
jsonutils.dumps([{'id': node.id}]),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
self.assertEqual(node.cluster, None)
resp = self.env.node_nics_get(node.id)
self.assertEqual(resp.status_code, 200)
for nic in resp.json_body:
self.assertNotEqual(nic["type"], NETWORK_INTERFACE_TYPES.bond)
def test_nics_bond_removed_on_remove_node_from_cluster(self):
self.get_node_nics_info()
self.nics_bond_create(self.put_single)
node = self.env.nodes[0]
resp = self.app.put(
reverse('ClusterHandler',
kwargs={'obj_id': self.env.clusters[0]['id']}),
jsonutils.dumps({'nodes': []}),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(node.cluster, None)
resp = self.env.node_nics_get(node.id)
self.assertEqual(resp.status_code, 200)
for nic in resp.json_body:
self.assertNotEqual(nic["type"], NETWORK_INTERFACE_TYPES.bond)
def test_nics_bond_create_failed_no_type(self):
self.data.append({
"name": 'ovs-bond0'
})
self.node_nics_put_check_error(
"Node '{0}': each interface must have a "
"type".format(self.env.nodes[0]["id"])
)
def test_nics_bond_create_failed_not_have_enough_data(self):
self.data.append({
"type": NETWORK_INTERFACE_TYPES.bond
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': each bond interface must have "
"name".format(self.env.nodes[0]["id"])
)
def test_nics_bond_create_failed_unknown_mode(self):
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": "unknown",
"slaves": [
{"name": self.other_nic["name"]},
{"name": self.empty_nic["name"]}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': bond interface 'ovs-bond0' has unknown mode "
"'unknown'".format(self.env.nodes[0]["id"])
)
def test_nics_bond_create_failed_no_mode(self):
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"slaves": [
{"name": self.other_nic["name"]},
{"name": self.empty_nic["name"]}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': bond interface 'ovs-bond0' doesn't have mode".format(
self.env.nodes[0]["id"]))
def test_nics_bond_create_failed_no_mode_in_properties(self):
self.data.append({
"name": 'bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"bond_properties": {
"xmit_hash_policy": BOND_XMIT_HASH_POLICY.layer2_3
},
"slaves": [
{"name": self.other_nic["name"]},
{"name": self.empty_nic["name"]}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': bond interface 'bond0' doesn't have mode".format(
self.env.nodes[0]["id"]))
def test_nics_bond_create_failed_unknown_mode_in_properties(self):
self.data.append({
"name": 'bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"bond_properties": {
"mode": 'unknown'
},
"slaves": [
{"name": self.other_nic["name"]},
{"name": self.empty_nic["name"]}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': bond interface 'bond0' has unknown mode "
"'unknown'".format(self.env.nodes[0]["id"]))
def test_nics_bond_create_failed_unknown_property(self):
self.data.append({
"name": 'bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"bond_properties": {
"mode": BOND_MODES.balance_xor,
"policy": BOND_XMIT_HASH_POLICY.layer2_3
},
"slaves": [
{"name": self.other_nic["name"]},
{"name": self.empty_nic["name"]}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}', interface 'bond0': unknown bond property "
"'policy'".format(self.env.nodes[0]["id"]))
def test_nics_bond_create_failed_no_slaves(self):
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": BOND_MODES.balance_slb,
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': each bond interface must have "
"two or more slaves".format(self.env.nodes[0]["id"])
)
def test_nics_bond_create_failed_one_slave(self):
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": BOND_MODES.balance_slb,
"slaves": [
{"name": self.other_nic["name"]}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': each bond interface must have "
"two or more slaves".format(self.env.nodes[0]["id"])
)
def test_nics_bond_create_failed_no_assigned_networks(self):
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": BOND_MODES.balance_slb,
"slaves": [
{"name": self.other_nic["name"]},
{"name": self.empty_nic["name"]}],
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}', interface 'ovs-bond0': there is no "
"'assigned_networks' list".format(self.env.nodes[0]["id"])
)
def test_nics_bond_create_failed_nic_is_used_twice(self):
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": BOND_MODES.balance_slb,
"slaves": [
{"name": self.other_nic["name"]},
{"name": self.other_nic["name"]}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': interface '{1}' is used in bonds more "
"than once".format(self.env.nodes[0]["id"], self.other_nic["id"])
)
def test_nics_bond_create_failed_duplicated_assigned_networks(self):
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": BOND_MODES.balance_slb,
"slaves": [
{"name": self.other_nic["name"]},
{"name": self.empty_nic["name"]}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.node_nics_put_check_error(
"Node '{0}': there is a duplicated network '{1}' in "
"assigned networks (second occurrence is in interface "
"'ovs-bond0')".format(
self.env.nodes[0]["id"],
self.other_nic["assigned_networks"][0]["id"])
)
def test_nics_bond_create_failed_unknown_interface(self):
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": BOND_MODES.balance_slb,
"slaves": [
{"name": self.other_nic["name"]},
{"name": "some_nic"}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': there is no interface 'some_nic' found for bond "
"'ovs-bond0' in DB".format(self.env.nodes[0]["id"])
)
def test_nics_bond_create_failed_slave_has_assigned_networks(self):
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": BOND_MODES.balance_slb,
"slaves": [
{"name": self.other_nic["name"]},
{"name": self.empty_nic["name"]}],
"assigned_networks": []
})
self.node_nics_put_check_error(
"Node '{0}': interface '{1}' cannot have assigned networks as it "
"is used in bond".format(self.env.nodes[0]["id"],
self.other_nic["id"])
)
def test_nics_bond_create_failed_slave_has_no_name(self):
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": BOND_MODES.balance_slb,
"slaves": [
{"name": self.other_nic["name"]},
{"nic": self.empty_nic["name"]}],
"assigned_networks": self.other_nic["assigned_networks"]
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}', interface 'ovs-bond0': each bond slave "
"must have name".format(self.env.nodes[0]["id"])
)
@mock.patch.dict(settings.VERSION, {'feature_groups': ['mirantis']})
def test_nics_bond_create_failed_admin_net_w_lacp_lnx(self):
mode = BOND_MODES.l_802_3ad
bond_nets = self.admin_nic["assigned_networks"] + \
self.other_nic["assigned_networks"]
self.data.append({
"name": 'lnx-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": mode,
"slaves": [
{"name": self.admin_nic["name"]},
{"name": self.other_nic["name"]}],
"assigned_networks": bond_nets
})
self.admin_nic["assigned_networks"] = []
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': interface 'lnx-bond0' belongs to admin network "
"and has lacp mode '{1}'".format(self.env.nodes[0]["id"], mode)
)
@mock.patch.dict(settings.VERSION, {'feature_groups': ['mirantis']})
def test_nics_bond_create_failed_admin_net_w_lacp_ovs(self):
mode = BOND_MODES.lacp_balance_tcp
bond_nets = self.admin_nic["assigned_networks"] + \
self.other_nic["assigned_networks"]
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": mode,
"slaves": [
{"name": self.admin_nic["name"]},
{"name": self.other_nic["name"]}],
"assigned_networks": bond_nets
})
self.admin_nic["assigned_networks"] = []
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': interface 'ovs-bond0' belongs to admin network "
"and has lacp mode '{1}'".format(self.env.nodes[0]["id"], mode)
)
def test_nics_bond_create_admin_net_w_lacp_experimental_mode(self):
mode = BOND_MODES.lacp_balance_tcp
bond_nets = self.admin_nic["assigned_networks"] + \
self.other_nic["assigned_networks"]
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": mode,
"slaves": [
{"name": self.admin_nic["name"]},
{"name": self.other_nic["name"]}],
"assigned_networks": bond_nets
})
self.admin_nic["assigned_networks"] = []
self.other_nic["assigned_networks"] = []
resp = self.put_single()
self.assertEqual(resp.status_code, 200)
def test_nics_bond_create_failed_admin_net_w_o_pxe_iface(self):
mode = BOND_MODES.balance_slb
bond_nets = [self.admin_nic["assigned_networks"][0]] + \
self.other_nic["assigned_networks"]
del self.admin_nic["assigned_networks"][0]
self.data.append({
"name": 'ovs-bond0',
"type": NETWORK_INTERFACE_TYPES.bond,
"mode": mode,
"slaves": [
{"name": self.empty_nic["name"]},
{"name": self.other_nic["name"]}],
"assigned_networks": bond_nets
})
self.other_nic["assigned_networks"] = []
self.node_nics_put_check_error(
"Node '{0}': interface 'ovs-bond0' belongs to admin network "
"and doesn't contain node's pxe interface 'eth0'".format(
self.env.nodes[0]["id"])
)
def test_nics_bond_change_offloading_modes(self):
self.get_node_nics_info()
self.nics_bond_create(self.put_single)
resp = self.app.get(
reverse("NodeNICsHandler",
kwargs={"node_id": self.env.nodes[0]["id"]}),
headers=self.default_headers)
self.assertEqual(200, resp.status_code)
body = resp.json_body
bonds = filter(
lambda iface: iface["type"] == NETWORK_INTERFACE_TYPES.bond,
body)
self.assertEqual(1, len(bonds))
bond_offloading_modes = bonds[0]['offloading_modes']
self.assertEqual(len(bond_offloading_modes), 1)
slaves = bonds[0]['slaves']
self.assertEqual(2, len(slaves))
self.assertIsNone(bond_offloading_modes[0]['state'])
bond_offloading_modes[0]['state'] = True
self.assertTrue(bond_offloading_modes[0]['state'])
resp = self.env.node_nics_put(
self.env.nodes[0]["id"],
body)
body = resp.json_body
bonds = filter(
lambda iface: iface["type"] == NETWORK_INTERFACE_TYPES.bond,
body)
self.assertEqual(1, len(bonds))
bond_offloading_modes = bonds[0]['offloading_modes']
self.assertEqual(len(bond_offloading_modes), 1)
slaves = bonds[0]['slaves']
self.assertEqual(2, len(slaves))
self.assertTrue(bond_offloading_modes[0]['state'])
|
|
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the cursor module."""
import unittest
import random
import warnings
import sys
import itertools
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.code import Code
from pymongo import (ASCENDING,
DESCENDING)
from pymongo.cursor import Cursor
from pymongo.database import Database
from pymongo.errors import (InvalidOperation,
OperationFailure)
from test_connection import get_connection
import version
class TestCursor(unittest.TestCase):
def setUp(self):
self.db = Database(get_connection(), "pymongo_test")
def test_explain(self):
a = self.db.test.find()
b = a.explain()
for _ in a:
break
c = a.explain()
del b["millis"]
b.pop("oldPlan", None)
del c["millis"]
c.pop("oldPlan", None)
self.assertEqual(b, c)
self.assert_("cursor" in b)
def test_hint(self):
db = self.db
self.assertRaises(TypeError, db.test.find().hint, 5.5)
db.test.drop()
for i in range(100):
db.test.insert({"num": i, "foo": i})
self.assertRaises(OperationFailure,
db.test.find({"num": 17, "foo": 17})
.hint([("num", ASCENDING)]).explain)
self.assertRaises(OperationFailure,
db.test.find({"num": 17, "foo": 17})
.hint([("foo", ASCENDING)]).explain)
index = db.test.create_index("num")
spec = [("num", ASCENDING)]
self.assertEqual(db.test.find({}).explain()["cursor"], "BasicCursor")
self.assertEqual(db.test.find({}).hint(spec).explain()["cursor"],
"BtreeCursor %s" % index)
self.assertEqual(db.test.find({}).hint(spec).hint(None)
.explain()["cursor"],
"BasicCursor")
self.assertRaises(OperationFailure,
db.test.find({"num": 17, "foo": 17})
.hint([("foo", ASCENDING)]).explain)
a = db.test.find({"num": 17})
a.hint(spec)
for _ in a:
break
self.assertRaises(InvalidOperation, a.hint, spec)
self.assertRaises(TypeError, db.test.find().hint, index)
def test_limit(self):
db = self.db
self.assertRaises(TypeError, db.test.find().limit, None)
self.assertRaises(TypeError, db.test.find().limit, "hello")
self.assertRaises(TypeError, db.test.find().limit, 5.5)
db.test.drop()
for i in range(100):
db.test.save({"x": i})
count = 0
for _ in db.test.find():
count += 1
self.assertEqual(count, 100)
count = 0
for _ in db.test.find().limit(20):
count += 1
self.assertEqual(count, 20)
count = 0
for _ in db.test.find().limit(99):
count += 1
self.assertEqual(count, 99)
count = 0
for _ in db.test.find().limit(1):
count += 1
self.assertEqual(count, 1)
count = 0
for _ in db.test.find().limit(0):
count += 1
self.assertEqual(count, 100)
count = 0
for _ in db.test.find().limit(0).limit(50).limit(10):
count += 1
self.assertEqual(count, 10)
a = db.test.find()
a.limit(10)
for _ in a:
break
self.assertRaises(InvalidOperation, a.limit, 5)
def test_batch_size(self):
db = self.db
db.test.drop()
for x in range(200):
db.test.save({"x": x})
self.assertRaises(TypeError, db.test.find().batch_size, None)
self.assertRaises(TypeError, db.test.find().batch_size, "hello")
self.assertRaises(TypeError, db.test.find().batch_size, 5.5)
self.assertRaises(ValueError, db.test.find().batch_size, -1)
a = db.test.find()
for _ in a:
break
self.assertRaises(InvalidOperation, a.batch_size, 5)
def cursor_count(cursor, expected_count):
count = 0
for _ in cursor:
count += 1
self.assertEqual(expected_count, count)
cursor_count(db.test.find().batch_size(0), 200)
cursor_count(db.test.find().batch_size(1), 200)
cursor_count(db.test.find().batch_size(2), 200)
cursor_count(db.test.find().batch_size(5), 200)
cursor_count(db.test.find().batch_size(100), 200)
cursor_count(db.test.find().batch_size(500), 200)
cursor_count(db.test.find().batch_size(0).limit(1), 1)
cursor_count(db.test.find().batch_size(1).limit(1), 1)
cursor_count(db.test.find().batch_size(2).limit(1), 1)
cursor_count(db.test.find().batch_size(5).limit(1), 1)
cursor_count(db.test.find().batch_size(100).limit(1), 1)
cursor_count(db.test.find().batch_size(500).limit(1), 1)
cursor_count(db.test.find().batch_size(0).limit(10), 10)
cursor_count(db.test.find().batch_size(1).limit(10), 10)
cursor_count(db.test.find().batch_size(2).limit(10), 10)
cursor_count(db.test.find().batch_size(5).limit(10), 10)
cursor_count(db.test.find().batch_size(100).limit(10), 10)
cursor_count(db.test.find().batch_size(500).limit(10), 10)
def test_skip(self):
db = self.db
self.assertRaises(TypeError, db.test.find().skip, None)
self.assertRaises(TypeError, db.test.find().skip, "hello")
self.assertRaises(TypeError, db.test.find().skip, 5.5)
db.drop_collection("test")
for i in range(100):
db.test.save({"x": i})
for i in db.test.find():
self.assertEqual(i["x"], 0)
break
for i in db.test.find().skip(20):
self.assertEqual(i["x"], 20)
break
for i in db.test.find().skip(99):
self.assertEqual(i["x"], 99)
break
for i in db.test.find().skip(1):
self.assertEqual(i["x"], 1)
break
for i in db.test.find().skip(0):
self.assertEqual(i["x"], 0)
break
for i in db.test.find().skip(0).skip(50).skip(10):
self.assertEqual(i["x"], 10)
break
for i in db.test.find().skip(1000):
self.fail()
a = db.test.find()
a.skip(10)
for _ in a:
break
self.assertRaises(InvalidOperation, a.skip, 5)
def test_sort(self):
db = self.db
self.assertRaises(TypeError, db.test.find().sort, 5)
self.assertRaises(ValueError, db.test.find().sort, [])
self.assertRaises(TypeError, db.test.find().sort, [], ASCENDING)
self.assertRaises(TypeError, db.test.find().sort,
[("hello", DESCENDING)], DESCENDING)
self.assertRaises(TypeError, db.test.find().sort, "hello", "world")
db.test.drop()
unsort = range(10)
random.shuffle(unsort)
for i in unsort:
db.test.save({"x": i})
asc = [i["x"] for i in db.test.find().sort("x", ASCENDING)]
self.assertEqual(asc, range(10))
asc = [i["x"] for i in db.test.find().sort("x")]
self.assertEqual(asc, range(10))
asc = [i["x"] for i in db.test.find().sort([("x", ASCENDING)])]
self.assertEqual(asc, range(10))
expect = range(10)
expect.reverse()
desc = [i["x"] for i in db.test.find().sort("x", DESCENDING)]
self.assertEqual(desc, expect)
desc = [i["x"] for i in db.test.find().sort([("x", DESCENDING)])]
self.assertEqual(desc, expect)
desc = [i["x"] for i in
db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)]
self.assertEqual(desc, expect)
expected = [(1, 5), (2, 5), (0, 3), (7, 3), (9, 2), (2, 1), (3, 1)]
shuffled = list(expected)
random.shuffle(shuffled)
db.test.drop()
for (a, b) in shuffled:
db.test.save({"a": a, "b": b})
result = [(i["a"], i["b"]) for i in
db.test.find().sort([("b", DESCENDING),
("a", ASCENDING)])]
self.assertEqual(result, expected)
a = db.test.find()
a.sort("x", ASCENDING)
for _ in a:
break
self.assertRaises(InvalidOperation, a.sort, "x", ASCENDING)
def test_count(self):
db = self.db
db.test.drop()
self.assertEqual(0, db.test.find().count())
for i in range(10):
db.test.save({"x": i})
self.assertEqual(10, db.test.find().count())
self.assert_(isinstance(db.test.find().count(), int))
self.assertEqual(10, db.test.find().limit(5).count())
self.assertEqual(10, db.test.find().skip(5).count())
self.assertEqual(1, db.test.find({"x": 1}).count())
self.assertEqual(5, db.test.find({"x": {"$lt": 5}}).count())
a = db.test.find()
b = a.count()
for _ in a:
break
self.assertEqual(b, a.count())
self.assertEqual(0, db.test.acollectionthatdoesntexist.find().count())
def test_where(self):
db = self.db
db.test.drop()
a = db.test.find()
self.assertRaises(TypeError, a.where, 5)
self.assertRaises(TypeError, a.where, None)
self.assertRaises(TypeError, a.where, {})
for i in range(10):
db.test.save({"x": i})
self.assertEqual(3, len(list(db.test.find().where('this.x < 3'))))
self.assertEqual(3,
len(list(db.test.find().where(Code('this.x < 3')))))
self.assertEqual(3, len(list(db.test.find().where(Code('this.x < i',
{"i": 3})))))
self.assertEqual(10, len(list(db.test.find())))
self.assertEqual(3, db.test.find().where('this.x < 3').count())
self.assertEqual(10, db.test.find().count())
self.assertEqual(3, db.test.find().where(u'this.x < 3').count())
self.assertEqual([0, 1, 2],
[a["x"] for a in
db.test.find().where('this.x < 3')])
self.assertEqual([],
[a["x"] for a in
db.test.find({"x": 5}).where('this.x < 3')])
self.assertEqual([5],
[a["x"] for a in
db.test.find({"x": 5}).where('this.x > 3')])
cursor = db.test.find().where('this.x < 3').where('this.x > 7')
self.assertEqual([8, 9], [a["x"] for a in cursor])
a = db.test.find()
b = a.where('this.x > 3')
for _ in a:
break
self.assertRaises(InvalidOperation, a.where, 'this.x < 3')
def test_kill_cursors(self):
db = self.db
db.drop_collection("test")
c = db.command("cursorInfo")["clientCursors_size"]
test = db.test
for i in range(10000):
test.insert({"i": i})
self.assertEqual(c, db.command("cursorInfo")["clientCursors_size"])
for _ in range(10):
db.test.find_one()
self.assertEqual(c, db.command("cursorInfo")["clientCursors_size"])
for _ in range(10):
for x in db.test.find():
break
self.assertEqual(c, db.command("cursorInfo")["clientCursors_size"])
a = db.test.find()
for x in a:
break
self.assertNotEqual(c, db.command("cursorInfo")["clientCursors_size"])
del a
self.assertEqual(c, db.command("cursorInfo")["clientCursors_size"])
a = db.test.find().limit(10)
for x in a:
break
self.assertEqual(c, db.command("cursorInfo")["clientCursors_size"])
def test_rewind(self):
self.db.test.save({"x": 1})
self.db.test.save({"x": 2})
self.db.test.save({"x": 3})
cursor = self.db.test.find().limit(2)
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
count = 0
for _ in cursor:
count += 1
self.assertEqual(0, count)
cursor.rewind()
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
cursor.rewind()
count = 0
for _ in cursor:
break
cursor.rewind()
for _ in cursor:
count += 1
self.assertEqual(2, count)
self.assertEqual(cursor, cursor.rewind())
def test_clone(self):
self.db.test.save({"x": 1})
self.db.test.save({"x": 2})
self.db.test.save({"x": 3})
cursor = self.db.test.find().limit(2)
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
count = 0
for _ in cursor:
count += 1
self.assertEqual(0, count)
cursor = cursor.clone()
cursor2 = cursor.clone()
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
for _ in cursor2:
count += 1
self.assertEqual(4, count)
cursor.rewind()
count = 0
for _ in cursor:
break
cursor = cursor.clone()
for _ in cursor:
count += 1
self.assertEqual(2, count)
self.assertNotEqual(cursor, cursor.clone())
class MyClass(dict):
pass
cursor = self.db.test.find(as_class=MyClass)
for e in cursor:
self.assertEqual(type(MyClass()), type(e))
cursor = self.db.test.find(as_class=MyClass)
self.assertEqual(type(MyClass()), type(cursor[0]))
def test_count_with_fields(self):
self.db.test.drop()
self.db.test.save({"x": 1})
if not version.at_least(self.db.connection, (1, 1, 3, -1)):
for _ in self.db.test.find({}, ["a"]):
self.fail()
self.assertEqual(0, self.db.test.find({}, ["a"]).count())
else:
self.assertEqual(1, self.db.test.find({}, ["a"]).count())
def test_bad_getitem(self):
self.assertRaises(TypeError, lambda x: self.db.test.find()[x], "hello")
self.assertRaises(TypeError, lambda x: self.db.test.find()[x], 5.5)
self.assertRaises(TypeError, lambda x: self.db.test.find()[x], None)
def test_getitem_slice_index(self):
self.db.drop_collection("test")
for i in range(100):
self.db.test.save({"i": i})
izip = itertools.izip
count = itertools.count
self.assertRaises(IndexError, lambda: self.db.test.find()[-1:])
self.assertRaises(IndexError, lambda: self.db.test.find()[1:2:2])
for a, b in izip(count(0), self.db.test.find()):
self.assertEqual(a, b['i'])
self.assertEqual(100, len(list(self.db.test.find()[0:])))
for a, b in izip(count(0), self.db.test.find()[0:]):
self.assertEqual(a, b['i'])
self.assertEqual(80, len(list(self.db.test.find()[20:])))
for a, b in izip(count(20), self.db.test.find()[20:]):
self.assertEqual(a, b['i'])
for a, b in izip(count(99), self.db.test.find()[99:]):
self.assertEqual(a, b['i'])
for i in self.db.test.find()[1000:]:
self.fail()
self.assertEqual(5, len(list(self.db.test.find()[20:25])))
self.assertEqual(5, len(list(self.db.test.find()[20L:25L])))
for a, b in izip(count(20), self.db.test.find()[20:25]):
self.assertEqual(a, b['i'])
self.assertEqual(80, len(list(self.db.test.find()[40:45][20:])))
for a, b in izip(count(20), self.db.test.find()[40:45][20:]):
self.assertEqual(a, b['i'])
self.assertEqual(80,
len(list(self.db.test.find()[40:45].limit(0).skip(20))
)
)
for a, b in izip(count(20),
self.db.test.find()[40:45].limit(0).skip(20)):
self.assertEqual(a, b['i'])
self.assertEqual(80,
len(list(self.db.test.find().limit(10).skip(40)[20:]))
)
for a, b in izip(count(20),
self.db.test.find().limit(10).skip(40)[20:]):
self.assertEqual(a, b['i'])
self.assertEqual(1, len(list(self.db.test.find()[:1])))
self.assertEqual(5, len(list(self.db.test.find()[:5])))
self.assertEqual(1, len(list(self.db.test.find()[99:100])))
self.assertEqual(1, len(list(self.db.test.find()[99:1000])))
self.assertEqual(0, len(list(self.db.test.find()[10:10])))
self.assertEqual(0, len(list(self.db.test.find()[:0])))
self.assertEqual(80,
len(list(self.db.test.find()[10:10].limit(0).skip(20))
)
)
self.assertRaises(IndexError, lambda: self.db.test.find()[10:8])
def test_getitem_numeric_index(self):
self.db.drop_collection("test")
for i in range(100):
self.db.test.save({"i": i})
self.assertEqual(0, self.db.test.find()[0]['i'])
self.assertEqual(50, self.db.test.find()[50]['i'])
self.assertEqual(50, self.db.test.find().skip(50)[0]['i'])
self.assertEqual(50, self.db.test.find().skip(49)[1]['i'])
self.assertEqual(50, self.db.test.find()[50L]['i'])
self.assertEqual(99, self.db.test.find()[99]['i'])
self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1)
self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100)
self.assertRaises(IndexError,
lambda x: self.db.test.find().skip(50)[x], 50)
def test_count_with_limit_and_skip(self):
if not version.at_least(self.db.connection, (1, 1, 4, -1)):
raise SkipTest()
def check_len(cursor, length):
self.assertEqual(len(list(cursor)), cursor.count(True))
self.assertEqual(length, cursor.count(True))
self.db.drop_collection("test")
for i in range(100):
self.db.test.save({"i": i})
check_len(self.db.test.find(), 100)
check_len(self.db.test.find().limit(10), 10)
check_len(self.db.test.find().limit(110), 100)
check_len(self.db.test.find().skip(10), 90)
check_len(self.db.test.find().skip(110), 0)
check_len(self.db.test.find().limit(10).skip(10), 10)
check_len(self.db.test.find()[10:20], 10)
check_len(self.db.test.find().limit(10).skip(95), 5)
check_len(self.db.test.find()[95:105], 5)
def test_len(self):
self.assertRaises(TypeError, len, self.db.test.find())
def test_properties(self):
self.assertEqual(self.db.test, self.db.test.find().collection)
def set_coll():
self.db.test.find().collection = "hello"
self.assertRaises(AttributeError, set_coll)
def test_tailable(self):
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=1000)
cursor = db.test.find(tailable=True)
db.test.insert({"x": 1})
count = 0
for doc in cursor:
count += 1
self.assertEqual(1, doc["x"])
self.assertEqual(1, count)
db.test.insert({"x": 2})
count = 0
for doc in cursor:
count += 1
self.assertEqual(2, doc["x"])
self.assertEqual(1, count)
db.test.insert({"x": 3})
count = 0
for doc in cursor:
count += 1
self.assertEqual(3, doc["x"])
self.assertEqual(1, count)
self.assertEqual(3, db.test.count())
db.drop_collection("test")
def test_distinct(self):
if not version.at_least(self.db.connection, (1, 1, 3, 1)):
raise SkipTest()
self.db.drop_collection("test")
self.db.test.save({"a": 1})
self.db.test.save({"a": 2})
self.db.test.save({"a": 2})
self.db.test.save({"a": 2})
self.db.test.save({"a": 3})
distinct = self.db.test.find({"a": {"$lt": 3}}).distinct("a")
distinct.sort()
self.assertEqual([1, 2], distinct)
self.db.drop_collection("test")
self.db.test.save({"a": {"b": "a"}, "c": 12})
self.db.test.save({"a": {"b": "b"}, "c": 8})
self.db.test.save({"a": {"b": "c"}, "c": 12})
self.db.test.save({"a": {"b": "c"}, "c": 8})
distinct = self.db.test.find({"c": 8}).distinct("a.b")
distinct.sort()
self.assertEqual(["b", "c"], distinct)
def test_max_scan(self):
if not version.at_least(self.db.connection, (1, 5, 1)):
raise SkipTest()
self.db.drop_collection("test")
for _ in range(100):
self.db.test.insert({})
self.assertEqual(100, len(list(self.db.test.find())))
self.assertEqual(50, len(list(self.db.test.find(max_scan=50))))
self.assertEqual(50, len(list(self.db.test.find()
.max_scan(90).max_scan(50))))
def test_with_statement(self):
c1 = self.db.test.find()
with self.db.test.find() as c2:
self.assertTrue(c2.alive)
self.assertTrue(c1.alive)
self.assertFalse(c2.alive)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2016 Hewlett Packard Enterprise Development LP.
# Copyright 2016 Universidade Federal de Campina Grande
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from oslo_log import log as logging
from oslo_utils import importutils
from ironic.common import exception
from ironic.common.i18n import _, _LE, _LI, _LW
from ironic.common import states
from ironic.drivers.modules.oneview import common
LOG = logging.getLogger(__name__)
oneview_exception = importutils.try_import('oneview_client.exceptions')
oneview_utils = importutils.try_import('oneview_client.utils')
def get_properties():
return common.COMMON_PROPERTIES
def prepare(oneview_client, task):
"""Applies Server Profile and update the node when preparing.
This method is responsible for applying a Server Profile to the Server
Hardware and add the uri of the applied Server Profile in the node's
'applied_server_profile_uri' field on properties/capabilities.
:param oneview_client: an instance of the OneView client
:param task: A TaskManager object
:raises InstanceDeployFailure: If the node doesn't have the needed OneView
informations, if Server Hardware is in use by an OneView user, or
if the Server Profile can't be applied.
"""
if task.node.provision_state == states.DEPLOYING:
try:
instance_display_name = task.node.instance_info.get('display_name')
instance_uuid = task.node.instance_uuid
server_profile_name = (
"%(instance_name)s [%(instance_uuid)s]" %
{"instance_name": instance_display_name,
"instance_uuid": instance_uuid}
)
allocate_server_hardware_to_ironic(oneview_client, task.node,
server_profile_name)
except exception.OneViewError as e:
raise exception.InstanceDeployFailure(node=task.node.uuid,
reason=e)
def tear_down(oneview_client, task):
"""Remove Server profile and update the node when tear down.
This method is responsible for power a Server Hardware off, remove a Server
Profile from the Server Hardware and remove the uri of the applied Server
Profile from the node's 'applied_server_profile_uri' in
properties/capabilities.
:param oneview_client: an instance of the OneView client
:param task: A TaskManager object
:raises InstanceDeployFailure: If node has no uri of applied Server
Profile, or if some error occur while deleting Server Profile.
"""
try:
deallocate_server_hardware_from_ironic(oneview_client, task.node)
except exception.OneViewError as e:
raise exception.InstanceDeployFailure(node=task.node.uuid, reason=e)
def prepare_cleaning(oneview_client, task):
"""Applies Server Profile and update the node when preparing cleaning.
This method is responsible for applying a Server Profile to the Server
Hardware and add the uri of the applied Server Profile in the node's
'applied_server_profile_uri' field on properties/capabilities.
:param oneview_client: an instance of the OneView client
:param task: A TaskManager object
:raises NodeCleaningFailure: If the node doesn't have the needed OneView
informations, if Server Hardware is in use by an OneView user, or
if the Server Profile can't be applied.
"""
try:
server_profile_name = "Ironic Cleaning [%s]" % task.node.uuid
allocate_server_hardware_to_ironic(oneview_client, task.node,
server_profile_name)
except exception.OneViewError as e:
oneview_error = common.SERVER_HARDWARE_ALLOCATION_ERROR
driver_internal_info = task.node.driver_internal_info
driver_internal_info['oneview_error'] = oneview_error
task.node.driver_internal_info = driver_internal_info
task.node.save()
raise exception.NodeCleaningFailure(node=task.node.uuid,
reason=e)
def tear_down_cleaning(oneview_client, task):
"""Remove Server profile and update the node when tear down cleaning.
This method is responsible for power a Server Hardware off, remove a Server
Profile from the Server Hardware and remove the uri of the applied Server
Profile from the node's 'applied_server_profile_uri' in
properties/capabilities.
:param oneview_client: an instance of the OneView client
:param task: A TaskManager object
:raises NodeCleaningFailure: If node has no uri of applied Server Profile,
or if some error occur while deleting Server Profile.
"""
try:
deallocate_server_hardware_from_ironic(oneview_client, task.node)
except exception.OneViewError as e:
raise exception.NodeCleaningFailure(node=task.node.uuid, reason=e)
def _is_node_in_use(server_hardware, applied_sp_uri, by_oneview=False):
"""Check if node is in use by ironic or by OneView.
:param by_oneview: Boolean value. True when want to verify if node is in
use by OneView. False to verify if node is in use by
ironic.
:param node: an ironic node object
:returns: Boolean value. True if by_oneview param is also True and node is
in use by OneView, False otherwise. True if by_oneview param is
False and node is in use by ironic, False otherwise.
"""
operation = operator.ne if by_oneview else operator.eq
return (server_hardware.server_profile_uri not in (None, '') and
operation(applied_sp_uri, server_hardware.server_profile_uri))
def is_node_in_use_by_oneview(oneview_client, node):
"""Check if node is in use by OneView user.
:param oneview_client: an instance of the OneView client
:param node: an ironic node object
:returns: Boolean value. True if node is in use by OneView,
False otherwise.
:raises OneViewError: if not possible to get OneView's informations
for the given node, if not possible to retrieve Server Hardware
from OneView.
"""
positive = _("Node '%s' is in use by OneView.") % node.uuid
negative = _("Node '%s' is not in use by OneView.") % node.uuid
def predicate(server_hardware, applied_sp_uri):
# Check if Profile exists in Oneview and it is different of the one
# applied by ironic
return _is_node_in_use(server_hardware, applied_sp_uri,
by_oneview=True)
return _check_applied_server_profile(oneview_client, node,
predicate, positive, negative)
def is_node_in_use_by_ironic(oneview_client, node):
"""Check if node is in use by ironic in OneView.
:param oneview_client: an instance of the OneView client
:param node: an ironic node object
:returns: Boolean value. True if node is in use by ironic,
False otherwise.
:raises OneViewError: if not possible to get OneView's information
for the given node, if not possible to retrieve Server Hardware
from OneView.
"""
positive = _("Node '%s' is in use by Ironic.") % node.uuid
negative = _("Node '%s' is not in use by Ironic.") % node.uuid
def predicate(server_hardware, applied_sp_uri):
# Check if Profile exists in Oneview and it is equals of the one
# applied by ironic
return _is_node_in_use(server_hardware, applied_sp_uri,
by_oneview=False)
return _check_applied_server_profile(oneview_client, node,
predicate, positive, negative)
def _check_applied_server_profile(oneview_client, node,
predicate, positive, negative):
"""Check if node is in use by ironic in OneView.
:param oneview_client: an instance of the OneView client
:param node: an ironic node object
:returns: Boolean value. True if node is in use by ironic,
False otherwise.
:raises OneViewError: if not possible to get OneView's information
for the given node, if not possible to retrieve Server Hardware
from OneView.
"""
oneview_info = common.get_oneview_info(node)
sh_uuid = oneview_utils.get_uuid_from_uri(
oneview_info.get("server_hardware_uri")
)
try:
server_hardware = oneview_client.get_server_hardware_by_uuid(
sh_uuid
)
except oneview_exception.OneViewResourceNotFoundError as e:
msg = (_("Error while obtaining Server Hardware from node "
"%(node_uuid)s. Error: %(error)s") %
{'node_uuid': node.uuid, 'error': e})
raise exception.OneViewError(error=msg)
applied_sp_uri = (
node.driver_info.get('applied_server_profile_uri')
)
result = predicate(server_hardware, applied_sp_uri)
if result:
LOG.debug(positive)
else:
LOG.debug(negative)
return result
def _add_applied_server_profile_uri_field(node, applied_profile):
"""Adds the applied Server Profile uri to a node.
:param node: an ironic node object
"""
driver_info = node.driver_info
driver_info['applied_server_profile_uri'] = applied_profile.uri
node.driver_info = driver_info
node.save()
def _del_applied_server_profile_uri_field(node):
"""Delete the applied Server Profile uri from a node if it exists.
:param node: an ironic node object
"""
driver_info = node.driver_info
driver_info.pop('applied_server_profile_uri', None)
node.driver_info = driver_info
node.save()
def allocate_server_hardware_to_ironic(oneview_client, node,
server_profile_name):
"""Allocate Server Hardware to ironic.
:param oneview_client: an instance of the OneView client
:param node: an ironic node object
:param server_profile_name: a formatted string with the Server Profile
name
:raises OneViewError: if an error occurs while allocating the Server
Hardware to ironic
"""
node_in_use_by_oneview = is_node_in_use_by_oneview(oneview_client, node)
if not node_in_use_by_oneview:
oneview_info = common.get_oneview_info(node)
applied_sp_uri = node.driver_info.get('applied_server_profile_uri')
sh_uuid = oneview_utils.get_uuid_from_uri(
oneview_info.get("server_hardware_uri")
)
spt_uuid = oneview_utils.get_uuid_from_uri(
oneview_info.get("server_profile_template_uri")
)
server_hardware = oneview_client.get_server_hardware_by_uuid(sh_uuid)
# Don't have Server Profile on OneView but has
# `applied_server_profile_uri` on driver_info
if (server_hardware.server_profile_uri in (None, '') and
applied_sp_uri is not (None, '')):
_del_applied_server_profile_uri_field(node)
LOG.info(_LI(
"Inconsistent 'applied_server_profile_uri' parameter "
"value in driver_info. There is no Server Profile "
"applied to node %(node_uuid)s. Value deleted."),
{"node_uuid": node.uuid}
)
# applied_server_profile_uri exists and is equal to Server profile
# applied on Hardware. Do not apply again.
if (applied_sp_uri and server_hardware.server_profile_uri and
server_hardware.server_profile_uri == applied_sp_uri):
LOG.info(_LI(
"The Server Profile %(applied_sp_uri)s was already applied "
"by ironic on node %(node_uuid)s. Reusing."),
{"node_uuid": node.uuid, "applied_sp_uri": applied_sp_uri}
)
return
try:
applied_profile = oneview_client.clone_template_and_apply(
server_profile_name, sh_uuid, spt_uuid
)
_add_applied_server_profile_uri_field(node, applied_profile)
LOG.info(
_LI("Server Profile %(server_profile_uuid)s was successfully"
" applied to node %(node_uuid)s."),
{"node_uuid": node.uuid,
"server_profile_uuid": applied_profile.uri}
)
except oneview_exception.OneViewServerProfileAssignmentError as e:
LOG.error(_LE("An error occurred during allocating server "
"hardware to ironic during prepare: %s"), e)
raise exception.OneViewError(error=e)
else:
msg = (_("Node %s is already in use by OneView.") %
node.uuid)
raise exception.OneViewError(error=msg)
def deallocate_server_hardware_from_ironic(oneview_client, node):
"""Deallocate Server Hardware from ironic.
:param oneview_client: an instance of the OneView client
:param node: an ironic node object
:raises OneViewError: if an error occurs while deallocating the Server
Hardware to ironic
"""
if is_node_in_use_by_ironic(oneview_client, node):
oneview_info = common.get_oneview_info(node)
server_profile_uuid = oneview_utils.get_uuid_from_uri(
oneview_info.get('applied_server_profile_uri')
)
try:
oneview_client.power_off(oneview_info)
oneview_client.delete_server_profile(server_profile_uuid)
_del_applied_server_profile_uri_field(node)
LOG.info(_LI("Server Profile %(server_profile_uuid)s was deleted "
"from node %(node_uuid)s in OneView."),
{'server_profile_uuid': server_profile_uuid,
'node_uuid': node.uuid})
except (ValueError, oneview_exception.OneViewException) as e:
msg = (_("Error while deleting applied Server Profile from node "
"%(node_uuid)s. Error: %(error)s") %
{'node_uuid': node.uuid, 'error': e})
raise exception.OneViewError(error=msg)
else:
LOG.warning(_LW("Cannot deallocate node %(node_uuid)s "
"in OneView because it is not in use by "
"ironic."), {'node_uuid': node.uuid})
|
|
"""
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
import __init__ as markdown
import re
from urlparse import urlparse, urlunparse
import sys
if sys.version >= "3.0":
from html import entities as htmlentitydefs
else:
import htmlentitydefs
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'(\*)([^\*]*)\2' # *emphasis*
STRONG_RE = r'(\*{2}|_{2})(.*?)\2' # **strong**
STRONG_EM_RE = r'(\*{3}|_{3})(.*?)\2' # ***strong***
if markdown.SMART_EMPHASIS:
EMPHASIS_2_RE = r'(?<!\S)(_)(\S.*?)\2' # _emphasis_
else:
EMPHASIS_2_RE = r'(_)(.*?)\2' # _emphasis_
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*)\12)?\)'''
# [text](url) or [text](<url>)
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
#  or 
REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'( \* )' # stand-alone * or _
AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
LINE_BREAK_RE = r' \n' # two spaces at end of line
LINE_BREAK_2_RE = r' $' # two spaces at end of text
def dequote(string):
"""Remove quotes from around a string."""
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern:
"""Base class that inline patterns subclass. """
def __init__ (self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp (self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
BasePattern = Pattern # for backward compatibility
class SimpleTextPattern (Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
text = m.group(2)
if text == markdown.INLINE_PLACEHOLDER_PREFIX:
return None
return text
class SimpleTagPattern (Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern (SimpleTagPattern):
""" Return a eLement of type `tag` with no children. """
def handleMatch (self, m):
return markdown.etree.Element(self.tag)
class BacktickPattern (Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = markdown.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern (SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = markdown.etree.Element(tag1)
el2 = markdown.etree.SubElement(el1, tag2)
el2.text = m.group(3)
return el1
class HtmlPattern (Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch (self, m):
rawhtml = m.group(2)
inline = True
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
class LinkPattern (Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.text = m.group(2)
title = m.group(11)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(href.strip()))
else:
el.set("href", "")
if title:
title = dequote(title) #.replace('"', """)
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
locless_schemes = ['', 'mailto', 'news']
scheme, netloc, path, params, query, fragment = url = urlparse(url)
safe_url = False
if netloc != '' or scheme in locless_schemes:
safe_url = True
for part in url[2:]:
if ":" in part:
safe_url = False
if self.markdown.safeMode and not safe_url:
return ''
else:
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(src))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(" ".join(src_parts[1:])))
if markdown.ENABLE_ATTRIBUTES:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', truealt)
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
def handleMatch(self, m):
if m.group(9):
id = m.group(9).lower()
else:
# if we got something like "[Google][]"
# we'll use "google" as the id
id = m.group(2).lower()
if not id in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = markdown.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern (ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = markdown.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
el.set("alt", text)
return el
class AutolinkPattern (Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.set('href', m.group(2))
el.text = markdown.AtomicString(m.group(2))
return el
class AutomailPattern (Pattern):
"""
Return a mailto link Element given an automail link (`<foo@example.com>`).
"""
def handleMatch(self, m):
el = markdown.etree.Element('a')
email = m.group(2)
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = htmlentitydefs.codepoint2name.get(code)
if entity:
return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = markdown.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el
|
|
##########################################################################
#
# Copyright (c) 2013, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import imath
import random
import shutil
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferScene
import GafferOSL
import GafferOSLTest
import GafferImage
class OSLShaderTest( GafferOSLTest.OSLTestCase ) :
def test( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n["name"].getValue(), s )
self.assertEqual( n["type"].getValue(), "osl:surface" )
self.assertEqual( n["parameters"].keys(), [ "i", "f", "c", "s", "m" ] )
self.assertTrue( isinstance( n["parameters"]["i"], Gaffer.IntPlug ) )
self.assertTrue( isinstance( n["parameters"]["f"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( n["parameters"]["c"], Gaffer.Color3fPlug ) )
self.assertTrue( isinstance( n["parameters"]["s"], Gaffer.StringPlug ) )
self.assertTrue( isinstance( n["parameters"]["m"], Gaffer.M44fPlug ) )
self.assertEqual( n["parameters"]["i"].defaultValue(), 10 )
self.assertEqual( n["parameters"]["f"].defaultValue(), 1 )
self.assertEqual( n["parameters"]["c"].defaultValue(), imath.Color3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["s"].defaultValue(), "s" )
self.assertEqual( n["parameters"]["m"].defaultValue(), imath.M44f() )
self.assertEqual( n["out"].typeId(), Gaffer.Plug.staticTypeId() )
network = n.attributes()["osl:surface"]
self.assertEqual( len( network ), 1 )
self.assertEqual( network.outputShader().name, s )
self.assertEqual( network.outputShader().type, "osl:surface" )
self.assertEqual( network.outputShader().parameters["i"], IECore.IntData( 10 ) )
self.assertEqual( network.outputShader().parameters["f"], IECore.FloatData( 1 ) )
self.assertEqual( network.outputShader().parameters["c"], IECore.Color3fData( imath.Color3f( 1, 2, 3 ) ) )
self.assertEqual( network.outputShader().parameters["s"], IECore.StringData( "s" ) )
self.assertEqual( network.outputShader().parameters["m"], IECore.M44fData( imath.M44f() ) )
def testOutputTypes( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n["name"].getValue(), s )
self.assertEqual( n["type"].getValue(), "osl:shader" )
self.assertEqual( len( n["parameters"] ), 1 )
self.assertEqual( n["parameters"].keys(), [ "input" ] )
self.assertEqual( n["out"].typeId(), Gaffer.Plug.staticTypeId() )
self.assertEqual( n["out"].keys(), [ "i", "f", "c", "s", "m" ] )
self.assertTrue( isinstance( n["out"]["i"], Gaffer.IntPlug ) )
self.assertTrue( isinstance( n["out"]["f"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( n["out"]["c"], Gaffer.Color3fPlug ) )
self.assertTrue( isinstance( n["out"]["s"], Gaffer.StringPlug ) )
self.assertTrue( isinstance( n["out"]["m"], Gaffer.M44fPlug ) )
def testNetwork( self ) :
typesShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
outputTypesShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
typesNode = GafferOSL.OSLShader( "types" )
outputTypesNode = GafferOSL.OSLShader( "outputTypes" )
typesNode.loadShader( typesShader )
outputTypesNode.loadShader( outputTypesShader )
typesNode["parameters"]["i"].setInput( outputTypesNode["out"]["i"] )
self.assertEqual( typesNode["parameters"]["i"].getValue(), 10 )
network = typesNode.attributes()["osl:surface"]
self.assertEqual( len( network ), 2 )
self.assertEqual( network.getOutput(), ( "types", "" ) )
types = network.getShader( "types" )
outputTypes = network.getShader( "outputTypes" )
self.assertEqual( types.name, typesShader )
self.assertEqual( types.type, "osl:surface" )
self.assertEqual( types.parameters["f"], IECore.FloatData( 1 ) )
self.assertEqual( types.parameters["c"], IECore.Color3fData( imath.Color3f( 1, 2, 3 ) ) )
self.assertEqual( types.parameters["s"], IECore.StringData( "s" ) )
self.assertEqual( outputTypes.name, outputTypesShader )
self.assertEqual( outputTypes.type, "osl:shader" )
self.assertEqual( outputTypes.parameters["input"], IECore.FloatData( 1 ) )
self.assertEqual(
network.inputConnections( "types" ),
[ ( ( "outputTypes", "i" ), ( "types", "i" ) ) ]
)
def testSerialiation( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
script = Gaffer.ScriptNode()
script["n"] = GafferOSL.OSLShader()
script["n"].loadShader( s )
script2 = Gaffer.ScriptNode()
script2.execute( script.serialise() )
self.assertEqual( script["n"]["name"].getValue(), script2["n"]["name"].getValue() )
self.assertEqual( script["n"]["type"].getValue(), script2["n"]["type"].getValue() )
self.assertEqual( script["n"]["parameters"].keys(), script2["n"]["parameters"].keys() )
self.assertEqual( script["n"]["out"].keys(), script2["n"]["out"].keys() )
def testLoadNonexistentShader( self ) :
n = GafferOSL.OSLShader()
self.assertRaises( RuntimeError, n.loadShader, "nonexistent" )
def testSearchPaths( self ) :
standardShaderPaths = os.environ["OSL_SHADER_PATHS"]
try:
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
os.environ["OSL_SHADER_PATHS"] = os.path.dirname( s )
n = GafferOSL.OSLShader()
n.loadShader( os.path.basename( s ) )
self.assertEqual( n["parameters"].keys(), [ "i", "f", "c", "s", "m" ] )
finally:
os.environ["OSL_SHADER_PATHS"] = standardShaderPaths
def testNoConnectionToParametersPlug( self ) :
vectorToFloat = GafferOSL.OSLShader()
vectorToFloat.loadShader( "Conversion/VectorToFloat" )
globals = GafferOSL.OSLShader()
globals.loadShader( "Utility/Globals" )
vectorToFloat["parameters"]["p"].setInput( globals["out"]["globalP"] )
self.assertTrue( vectorToFloat["parameters"]["p"].getInput().isSame( globals["out"]["globalP"] ) )
self.assertTrue( vectorToFloat["parameters"]["p"][0].getInput().isSame( globals["out"]["globalP"][0] ) )
self.assertTrue( vectorToFloat["parameters"]["p"][1].getInput().isSame( globals["out"]["globalP"][1] ) )
self.assertTrue( vectorToFloat["parameters"]["p"][2].getInput().isSame( globals["out"]["globalP"][2] ) )
self.assertTrue( vectorToFloat["parameters"].getInput() is None )
def testStructs( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/structs.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n["parameters"].keys(), [ "i", "f", "s", "ss" ] )
self.assertEqual( n["parameters"]["i"].defaultValue(), 2 )
self.assertEqual( n["parameters"]["f"].defaultValue(), 3 )
self.assertEqual( n["parameters"]["ss"].defaultValue(), "ss" )
self.assertEqual( n["parameters"]["s"].keys(), [ "i", "f", "c", "s" ] )
self.assertEqual( n["parameters"]["s"]["i"].defaultValue(), 1 )
self.assertEqual( n["parameters"]["s"]["f"].defaultValue(), 2 )
self.assertEqual( n["parameters"]["s"]["c"].defaultValue(), imath.Color3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["s"]["s"].defaultValue(), "s" )
n["parameters"]["s"]["i"].setValue( 10 )
n["parameters"]["s"]["f"].setValue( 21 )
n["parameters"]["s"]["c"].setValue( imath.Color3f( 3, 4, 5 ) )
n["parameters"]["s"]["s"].setValue( "ttt" )
network = n.attributes()["osl:shader"]
shader = network.outputShader()
self.assertEqual( len( shader.parameters ), 7 )
self.assertTrue( shader.parameters["i"], IECore.IntData( 2 ) )
self.assertTrue( shader.parameters["f"], IECore.FloatData( 3 ) )
self.assertTrue( shader.parameters["s.i"], IECore.IntData( 10 ) )
self.assertTrue( shader.parameters["s.f"], IECore.FloatData( 21 ) )
self.assertTrue( shader.parameters["s.c"], IECore.Color3fData( imath.Color3f( 3, 4, 5 ) ) )
self.assertTrue( shader.parameters["s.s"], IECore.StringData( "ttt" ) )
self.assertTrue( shader.parameters["ss"], IECore.StringData( "ss" ) )
h1 = n.attributesHash()
n["parameters"]["s"]["i"].setValue( 100 )
h2 = n.attributesHash()
self.assertNotEqual( h1, h2 )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
g = GafferOSL.OSLShader()
g.loadShader( s2 )
n["parameters"]["s"]["i"].setInput( g["out"]["i"] )
h3 = n.attributesHash()
self.assertNotEqual( h1, h3 )
self.assertNotEqual( h2, h3 )
def testOutputPlugAffectsHash( self ) :
globals = GafferOSL.OSLShader()
globals.loadShader( "Utility/Globals" )
floatToColor = GafferOSL.OSLShader()
floatToColor.loadShader( "Conversion/FloatToColor" )
floatToColor["parameters"]["r"].setInput( globals["out"]["globalU"] )
h1 = floatToColor.attributesHash()
floatToColor["parameters"]["r"].setInput( globals["out"]["globalV"] )
h2 = floatToColor.attributesHash()
self.assertNotEqual( h1, h2 )
def testCanConnectVectorToColor( self ) :
globals = GafferOSL.OSLShader()
globals.loadShader( "Utility/Globals" )
constant = GafferOSL.OSLShader()
constant.loadShader( "Surface/Constant" )
self.assertTrue( constant["parameters"]["Cs"].acceptsInput( globals["out"]["globalP"] ) )
constant["parameters"]["Cs"].setInput( globals["out"]["globalP"] )
self.assertTrue( constant["parameters"]["Cs"].getInput().isSame( globals["out"]["globalP"] ) )
def testClosureParameters( self ) :
outputClosureShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputClosure.osl" )
outputClosure = GafferOSL.OSLShader( "outputClosure" )
outputClosure.loadShader( outputClosureShader )
inputClosureShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/inputClosure.osl" )
inputClosure = GafferOSL.OSLShader( "inputClosure" )
inputClosure.loadShader( inputClosureShader )
self.assertEqual( outputClosure["out"]["c"].typeId(), GafferOSL.ClosurePlug.staticTypeId() )
self.assertEqual( inputClosure["parameters"]["i"].typeId(), GafferOSL.ClosurePlug.staticTypeId() )
inputClosure["parameters"]["i"].setInput( outputClosure["out"]["c"] )
network = inputClosure.attributes()["osl:surface"]
self.assertEqual( len( network ), 2 )
self.assertNotIn( "i", network.outputShader().parameters )
self.assertEqual(
network.inputConnections( "inputClosure" ),
[ network.Connection( ( "outputClosure", "c" ), ( "inputClosure", "i" ) ) ]
)
def testClosureParametersInputAcceptance( self ) :
outputClosureShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputClosure.osl" )
outputClosure = GafferOSL.OSLShader()
outputClosure.loadShader( outputClosureShader )
inputClosureShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/inputClosure.osl" )
inputClosure = GafferOSL.OSLShader()
inputClosure.loadShader( inputClosureShader )
outputColor = GafferOSL.OSLShader()
outputColor.loadShader( "Conversion/VectorToColor" )
self.assertTrue( inputClosure["parameters"]["i"].acceptsInput( outputClosure["out"]["c"] ) )
self.assertFalse( inputClosure["parameters"]["i"].acceptsInput( outputColor["out"]["c"] ) )
def testOutputClosureDirtying( self ) :
outputClosureShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputClosure.osl" )
outputClosure = GafferOSL.OSLShader()
outputClosure.loadShader( outputClosureShader )
cs = GafferTest.CapturingSlot( outputClosure.plugDirtiedSignal() )
outputClosure["parameters"]["e"]["r"].setValue( 10 )
self.assertTrue( outputClosure["out"] in [ x[0] for x in cs ] )
self.assertTrue( outputClosure["out"]["c"] in [ x[0] for x in cs ] )
def testRepeatability( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
sn1 = GafferOSL.OSLShader()
sn1.loadShader( s1 )
sn2 = GafferOSL.OSLShader()
sn2.loadShader( s2 )
sn2["parameters"]["i"].setInput( sn1["out"]["i"] )
self.assertEqual( sn2.attributesHash(), sn2.attributesHash() )
self.assertEqual( sn2.attributes(), sn2.attributes() )
def testHandlesAreHumanReadable( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
sn1 = GafferOSL.OSLShader( "Shader1" )
sn1.loadShader( s1 )
sn2 = GafferOSL.OSLShader( "Shader2" )
sn2.loadShader( s2 )
sn2["parameters"]["i"].setInput( sn1["out"]["i"] )
network = sn2.attributes()["osl:surface"]
self.assertEqual( set( network.shaders().keys() ), { "Shader1", "Shader2" } )
def testHandlesAreUniqueEvenIfNodeNamesArent( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
script = Gaffer.ScriptNode()
script["in1"] = GafferOSL.OSLShader()
script["in1"].loadShader( s1 )
script["in2"] = GafferOSL.OSLShader()
script["in2"].loadShader( s1 )
script["shader"] = GafferOSL.OSLShader()
script["shader"].loadShader( s2 )
script["shader"]["parameters"]["i"].setInput( script["in1"]["out"]["i"] )
script["shader"]["parameters"]["f"].setInput( script["in2"]["out"]["f"] )
box = Gaffer.Box.create( script, Gaffer.StandardSet( [ script["in1"] ] ) )
# because the nodes have different parents, we can give them the same name.
box["in1"].setName( "notUnique" )
script["in2"].setName( "notUnique" )
network = script["shader"].attributes()["osl:surface"]
self.assertEqual( len( network.shaders() ), 3 )
def testShaderMetadata( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/metadata.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n.shaderMetadata( "stringValue" ), "s" )
self.assertEqual( n.shaderMetadata( "intValue" ), 1 )
self.assertEqual( n.shaderMetadata( "floatValue" ), 0.5 )
def testParameterMetadata( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/metadata.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aStringValue" ), "s" )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aIntValue" ), 1 )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aFloatValue" ), 0.5 )
self.assertEqual( n.parameterMetadata( n["parameters"]["b"], "bStringValue" ), "st" )
self.assertEqual( n.parameterMetadata( n["parameters"]["b"], "bIntValue" ), 2 )
self.assertEqual( n.parameterMetadata( n["parameters"]["b"], "bFloatValue" ), 0.75 )
def testParameterArrayMetadata( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/arrayMetadata.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aStringValues" ), IECore.StringVectorData( [ "one","two" ] ) )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aIntValues" ), IECore.IntVectorData( [ 1, 2 ] ) )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aFloatValues" ), IECore.FloatVectorData( [ 0.25, 0.5 ] ) )
def testParameterMinMaxMetadata( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/metadataMinMax.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertAlmostEqual( n["parameters"]["b"].minValue(), 2.3, delta = 0.00001 )
self.assertAlmostEqual( n["parameters"]["b"].maxValue(), 4.7, delta = 0.00001 )
self.assertEqual( n["parameters"]["c"].minValue(), 23 )
self.assertEqual( n["parameters"]["c"].maxValue(), 47 )
self.assertEqual( n["parameters"]["d"].minValue(), imath.Color3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["d"].maxValue(), imath.Color3f( 4, 5, 6 ) )
self.assertEqual( n["parameters"]["e"].minValue(), imath.V3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["e"].maxValue(), imath.V3f( 4, 5, 6 ) )
self.assertEqual( n["parameters"]["f"].minValue(), imath.V3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["f"].maxValue(), imath.V3f( 4, 5, 6 ) )
self.assertEqual( n["parameters"]["g"].minValue(), imath.V3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["g"].maxValue(), imath.V3f( 4, 5, 6 ) )
# Check default min/max if not specified
self.assertFalse( n["parameters"]["h"].hasMinValue() )
self.assertFalse( n["parameters"]["h"].hasMaxValue() )
def testParameterSplineMetadata( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineMetadata.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
# If the components of the spline all match, the metadata is registered to the spline plug
self.assertEqual( n.parameterMetadata( n["parameters"]["correctSpline"], "a" ), 1 )
self.assertEqual( n.parameterMetadata( n["parameters"]["correctSpline"], "b" ), 2 )
self.assertEqual( n.parameterMetadata( n["parameters"]["correctSpline"], "c" ), 3 )
# If the components don't match, the metadata is registered to the individual plugs
# Note that array plugs are not supported, so we can't test Values and Positions
self.assertEqual( n.parameterMetadata( n["parameters"]["incompleteSplineBasis"], "c" ), 3 )
def testMetadataReuse( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/arrayMetadata.osl" )
n1 = GafferOSL.OSLShader()
n1.loadShader( s )
n2 = GafferOSL.OSLShader()
n2.loadShader( s )
# we don't want every shader to have its own copy of metadata when it could be shared
self.assertTrue(
n1.parameterMetadata( n1["parameters"]["a"], "aStringValues", _copy = False ).isSame(
n2.parameterMetadata( n2["parameters"]["a"], "aStringValues", _copy = False )
)
)
# but because there is no const in python, we want to make sure that the casual
# caller doesn't have the opportunity to really break things, so unless requested
# copies are returned from the query.
n1.parameterMetadata( n1["parameters"]["a"], "aStringValues" ).value = "editingSharedConstDataIsABadIdea"
self.assertEqual( n1.parameterMetadata( n1["parameters"]["a"], "aStringValues" ), IECore.StringVectorData( [ "one", "two" ] ) )
def testAcceptsNoneInput( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertTrue( n["parameters"]["i"].acceptsInput( None ) )
def testOverzealousCycleDetection( self ) :
globals = GafferOSL.OSLShader( "Globals" )
globals.loadShader( "Utility/Globals" )
point = GafferOSL.OSLShader( "Point" )
point.loadShader( "Conversion/FloatToVector" )
noise = GafferOSL.OSLShader( "Noise" )
noise.loadShader( "Pattern/Noise" )
color = GafferOSL.OSLShader( "Color" )
color.loadShader( "Conversion/FloatToColor" )
point["parameters"]["x"].setInput( globals["out"]["globalU"] )
point["parameters"]["y"].setInput( globals["out"]["globalV"] )
noise["parameters"]["p"].setInput( point["out"]["p"] )
color["parameters"]["r"].setInput( globals["out"]["globalU"] )
color["parameters"]["g"].setInput( noise["out"]["n"] )
# Should not throw - there are no cycles above.
color.attributesHash()
color.attributes()
def testLoadNetworkFromVersion0_23( self ) :
s = Gaffer.ScriptNode()
s["fileName"].setValue( os.path.dirname( __file__ ) + "/scripts/networkVersion-0.23.2.1.gfr" )
s.load()
for plug, expectedValue, expectedInput in [
( "InFloat.parameters.name", "s", None ),
( "InFloat.parameters.defaultValue", 1, None ),
( "InFloat1.parameters.name", "t", None ),
( "InFloat1.parameters.defaultValue", 0.5, None ),
( "InFloat2.parameters.name", "u", None ),
( "InFloat2.parameters.defaultValue", 0.25, None ),
( "OutPoint.parameters.name", "stu", None ),
( "BuildPoint.parameters.x", None, "InFloat.out.value" ),
( "BuildPoint.parameters.y", None, "InFloat1.out.value" ),
( "BuildPoint.parameters.z", None, "InFloat2.out.value" ),
( "OutPoint.parameters.value", None, "BuildPoint.out.p" ),
( "OutObject.parameters.in0", None, "OutPoint.out.primitiveVariable" ),
] :
if expectedInput is not None :
self.assertTrue( s.descendant( plug ).getInput().isSame( s.descendant( expectedInput ) ) )
else :
self.assertTrue( s.descendant( plug ).getInput() is None )
if expectedValue is not None :
self.assertEqual( s.descendant( plug ).getValue(), expectedValue )
def testReload( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.osl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s1 )
s1Parameters = n["parameters"].keys()
self.assertEqual(
s1Parameters,
[
"commonI",
"commonF",
"commonColor",
"commonString",
"commonStruct",
"commonArray",
"removedI",
"removedF",
"removedColor",
"removedString",
"removedStruct",
"typeChanged1",
"typeChanged2",
"typeChanged3",
"typeChanged4",
"typeChanged5",
"defaultChangedArray",
]
)
self.assertEqual(
n["parameters"]["commonStruct"].keys(),
[
"commonI",
"commonF",
"commonColor",
"commonString",
"removedI",
"removedF",
"removedColor",
"removedString",
"typeChanged1",
"typeChanged2",
"typeChanged3",
"typeChanged4",
]
)
values = {
"commonI" : 10,
"commonF" : 25,
"commonColor" : imath.Color3f( 1 ),
"commonString" : "test",
"commonStruct.commonI" : 11,
"commonStruct.commonF" : 2.5,
"commonStruct.commonColor" : imath.Color3f( 0.5 ),
"commonStruct.commonString" : "test2",
"commonArray" : IECore.FloatVectorData( [ 0, 1, 2 ] )
}
for key, value in values.items() :
n["parameters"].descendant( key ).setValue( value )
arrayToNotGetReloaded = n["parameters"]["commonArray"]
arrayToGetReloaded = n["parameters"]["defaultChangedArray"]
self.assertTrue( isinstance( n["parameters"]["typeChanged1"], Gaffer.IntPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged2"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged3"], Gaffer.Color3fPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged4"], Gaffer.StringPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged5"], Gaffer.V3fPlug ) )
self.assertTrue( n["parameters"]["typeChanged5"].interpretation(), IECore.GeometricData.Interpretation.Vector)
n.loadShader( s2, keepExistingValues = True )
self.assertEqual(
n["parameters"].keys(),
[
"commonI",
"commonF",
"commonColor",
"commonString",
"commonStruct",
"commonArray",
"typeChanged1",
"typeChanged2",
"typeChanged3",
"typeChanged4",
"typeChanged5",
"addedI",
"addedF",
"addedColor",
"addedString",
"addedStruct",
"defaultChangedArray",
]
)
self.assertEqual(
n["parameters"]["commonStruct"].keys(),
[
"commonI",
"commonF",
"commonColor",
"commonString",
"typeChanged1",
"typeChanged2",
"typeChanged3",
"typeChanged4",
"addedI",
"addedF",
"addedColor",
"addedString",
]
)
self.assertEqual( arrayToNotGetReloaded, n["parameters"]["commonArray"] )
self.assertNotEqual( arrayToGetReloaded, n["parameters"]["defaultChangedArray"] )
for key, value in values.items() :
self.assertEqual( n["parameters"].descendant( key ).getValue(), value )
self.assertTrue( isinstance( n["parameters"]["typeChanged1"], Gaffer.StringPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged2"], Gaffer.Color3fPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged3"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged4"], Gaffer.IntPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged5"], Gaffer.V3fPlug ) )
self.assertEqual( n["parameters"]["typeChanged5"].interpretation(), IECore.GeometricData.Interpretation.Normal)
n.loadShader( s2, keepExistingValues = False )
for plug in n["parameters"] :
if isinstance( plug, Gaffer.ValuePlug ) :
self.assertTrue( plug.isSetToDefault() )
shutil.copyfile( s1 + ".oso", s2 + ".oso" )
n.reloadShader()
self.assertEqual(
n["parameters"].keys(),
s1Parameters
)
def testSplineParameters( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n["parameters"].keys(), [ "floatSpline", "colorSpline" ] )
self.assertTrue( isinstance( n["parameters"]["floatSpline"], Gaffer.SplineffPlug ) )
self.assertEqual(
n["parameters"]["floatSpline"].getValue().spline(),
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 1 ),
( 1, 1 ),
]
)
)
self.assertTrue( isinstance( n["parameters"]["colorSpline"], Gaffer.SplinefColor3fPlug ) )
self.assertEqual(
n["parameters"]["colorSpline"].getValue().spline(),
IECore.SplinefColor3f(
IECore.CubicBasisf.bSpline(),
[
( 0, imath.Color3f( 0 ) ),
( 0, imath.Color3f( 0 ) ),
( 0, imath.Color3f( 0 ) ),
( 1, imath.Color3f( 1 ) ),
( 1, imath.Color3f( 1 ) ),
( 1, imath.Color3f( 1 ) ),
]
)
)
shader = n.attributes()["osl:shader"].outputShader()
self.assertEqual(
shader.parameters["floatSpline"].value,
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 1 ),
( 1, 1 ),
]
)
)
self.assertEqual(
shader.parameters["colorSpline"].value,
IECore.SplinefColor3f(
IECore.CubicBasisf.bSpline(),
[
( 0, imath.Color3f( 0 ) ),
( 0, imath.Color3f( 0 ) ),
( 0, imath.Color3f( 0 ) ),
( 1, imath.Color3f( 1 ) ),
( 1, imath.Color3f( 1 ) ),
( 1, imath.Color3f( 1 ) ),
]
)
)
def testSplineParameterEvaluation( self ) :
numSamples = 100
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
points = [
( 0, imath.Color3f( 0.5 ) ),
( 0.3, imath.Color3f( 0.2 ) ),
( 0.6, imath.Color3f( 1 ) ),
( 0.65, imath.Color3f( 0.5 ) ),
( 0.9, imath.Color3f( 0.7 ) ),
( 1, imath.Color3f( 1 ) )
]
constant = GafferImage.Constant( "Constant" )
constant["format"].setValue( GafferImage.Format( 1, numSamples, 1.000 ) )
image = GafferOSL.OSLImage()
image["in"].setInput( constant["out"] )
image["shader"].setInput( n["out"]["out"] )
for interpolation in [
Gaffer.SplineDefinitionInterpolation.Linear,
Gaffer.SplineDefinitionInterpolation.CatmullRom,
Gaffer.SplineDefinitionInterpolation.BSpline,
Gaffer.SplineDefinitionInterpolation.MonotoneCubic
]:
n["parameters"]["colorSpline"].setValue( Gaffer.SplineDefinitionfColor3f( points, interpolation ) )
oslSamples = list( reversed( GafferImage.ImageAlgo.image( image['out'] )["R"] ) )
s = n['parameters']['colorSpline'].getValue().spline()
cortexSamples = [ s( ( i + 0.5 ) / numSamples )[0] for i in range( numSamples ) ]
for a, b in zip( oslSamples, cortexSamples ):
self.assertAlmostEqual( a, b, places = 4 )
def testArrays( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/arrays.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n["parameters"].keys(), [ "i", "f", "c", "p", "q", "s", "m" ] )
self.assertTrue( isinstance( n["parameters"]["i"], Gaffer.IntVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["f"], Gaffer.FloatVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["c"], Gaffer.Color3fVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["p"], Gaffer.V3fVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["q"], Gaffer.V3fVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["s"], Gaffer.StringVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["m"], Gaffer.M44fVectorDataPlug ) )
self.assertEqual( n["parameters"]["i"].defaultValue(), IECore.IntVectorData( [ 10, 11, 12 ] ) )
self.assertEqual( n["parameters"]["f"].defaultValue(), IECore.FloatVectorData( [ 1, 2 ] ) )
self.assertEqual( n["parameters"]["c"].defaultValue(), IECore.Color3fVectorData(
[ imath.Color3f( 1, 2, 3 ), imath.Color3f( 4, 5, 6 ) ] ) )
self.assertEqual( n["parameters"]["p"].defaultValue(), IECore.V3fVectorData(
[ imath.V3f( 1, 2, 3 ), imath.V3f( 4, 5, 6 ) ] ) )
self.assertEqual( n["parameters"]["q"].defaultValue(), IECore.V3fVectorData(
[ imath.V3f( 1, 2, 3 ), imath.V3f( 4, 5, 6 ) ] ) )
self.assertEqual( n["parameters"]["s"].defaultValue(), IECore.StringVectorData( [ "s", "t", "u", "v", "word" ] ) )
self.assertEqual( n["parameters"]["m"].defaultValue(), IECore.M44fVectorData(
[ imath.M44f() * 1, imath.M44f() * 0, imath.M44f() * 1 ] ) )
self.assertEqual( n["out"].typeId(), Gaffer.Plug.staticTypeId() )
network = n.attributes()["osl:surface"]
self.assertEqual( len( network ), 1 )
self.assertEqual( network.outputShader().name, s )
self.assertEqual( network.outputShader().type, "osl:surface" )
self.assertEqual( network.outputShader().parameters["i"], IECore.IntVectorData( [ 10, 11, 12 ] ) )
self.assertEqual( network.outputShader().parameters["f"], IECore.FloatVectorData( [ 1, 2 ] ) )
self.assertEqual( network.outputShader().parameters["c"], IECore.Color3fVectorData(
[ imath.Color3f( 1, 2, 3 ), imath.Color3f( 4, 5, 6 ) ] ) )
self.assertEqual( network.outputShader().parameters["p"], IECore.V3fVectorData(
[ imath.V3f( 1, 2, 3 ), imath.V3f( 4, 5, 6 ) ] ) )
self.assertEqual( network.outputShader().parameters["q"], IECore.V3fVectorData(
[ imath.V3f( 1, 2, 3 ), imath.V3f( 4, 5, 6 ) ] ) )
self.assertEqual( network.outputShader().parameters["s"], IECore.StringVectorData( [ "s", "t", "u", "v", "word" ] ) )
self.assertEqual( network.outputShader().parameters["m"], IECore.M44fVectorData(
[ imath.M44f() * 1, imath.M44f() * 0, imath.M44f() * 1 ] ) )
def testUnload( self ) :
n = GafferOSL.OSLShader()
n.loadShader( self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" ) )
self.assertTrue( "osl:surface" in n.attributes() )
n.loadShader( "" )
self.assertEqual( len( n["parameters"] ), 0 )
self.assertEqual( n["type"].getValue(), "" )
self.assertEqual( n["name"].getValue(), "" )
self.assertFalse( "osl:surface" in n.attributes() )
def testLoadSurfaceAfterShader( self ) :
n = GafferOSL.OSLShader()
n.loadShader( self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" ) )
self.assertEqual( len( n["out"] ), 5 )
n.loadShader( self.compileShader( os.path.dirname( __file__ ) + "/shaders/constant.osl" ) )
self.assertEqual( len( n["out"] ), 0 )
def testReconnectionOfChildPlugShader( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferOSL.OSLShader()
s["n1"].loadShader( "Maths/AddVector" )
s["n2"] = GafferOSL.OSLShader()
s["n2"].loadShader( "Maths/AddVector" )
s["n3"] = GafferOSL.OSLShader()
s["n3"].loadShader( "Maths/AddVector" )
s["n2"]["parameters"]["a"].setInput( s["n1"]["out"]["out"] )
s["n3"]["parameters"]["a"].setInput( s["n2"]["out"]["out"] )
s.deleteNodes( filter = Gaffer.StandardSet( [ s["n2"] ] ) )
self.assertTrue( s["n3"]["parameters"]["a"].getInput().isSame( s["n1"]["out"]["out"] ) )
def testDisablingShader( self ) :
n1 = GafferOSL.OSLShader( "n1" )
n1.loadShader( "Maths/AddVector" )
n1["parameters"]["a"].setValue( imath.V3f( 5, 7, 6 ) )
n2 = GafferOSL.OSLShader( "n2" )
n2.loadShader( "Maths/AddVector" )
n3 = GafferOSL.OSLShader( "n3" )
n3.loadShader( "Maths/AddVector" )
n2["parameters"]["a"].setInput( n1["out"]["out"] )
n3["parameters"]["a"].setInput( n2["out"]["out"] )
n2["enabled"].setValue( False )
network = n3.attributes()["osl:shader"]
self.assertEqual( len( network ), 2 )
self.assertEqual( network.inputConnections( "n3" ), [ network.Connection( ( "n1", "out" ), ( "n3", "a" ) ) ] )
self.assertEqual( network.getShader( "n1" ).parameters["a"].value, imath.V3f( 5, 7, 6 ) )
def testDisabledShaderPassesThroughExternalValue( self ) :
n1 = Gaffer.Node()
n1["user"]["v"] = Gaffer.V3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n1["user"]["v"].setValue( imath.V3f( 12, 11, 10 ) )
n2 = GafferOSL.OSLShader( "n2" )
n2.loadShader( "Maths/AddVector" )
n2["parameters"]["a"].setInput( n1["user"]["v"] )
n3 = GafferOSL.OSLShader( "n3" )
n3.loadShader( "Maths/AddVector" )
n3["parameters"]["a"].setInput( n2["parameters"]["a"] )
n2["enabled"].setValue( False )
network = n3.attributes()["osl:shader"]
self.assertEqual( len( network ), 1 )
self.assertEqual( network.getShader( "n3" ).parameters["a"].value, imath.V3f( 12, 11, 10 ) )
def testDisabledShaderEvaluatesStateCorrectly( self ) :
redShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/red.osl" )
greenShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/green.osl" )
n2 = GafferOSL.OSLShader( "red1" )
n2.loadShader( redShader )
n3 = GafferOSL.OSLShader( "green1" )
n3.loadShader( greenShader )
n1 = GafferOSL.OSLShader( "add" )
n1.loadShader( self.compileShader( os.path.dirname( __file__ ) + "/shaders/add.osl" ) )
n1['parameters']['a'].setInput(n2["out"]["out"])
n1['parameters']['b'].setInput(n3["out"]["out"])
sphere = GafferScene.Sphere()
shaderAssignment = GafferScene.ShaderAssignment()
shaderAssignment["in"].setInput(sphere["out"])
pathFilter = GafferScene.PathFilter()
pathFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
shaderAssignment["filter"].setInput(pathFilter["out"])
shaderAssignment["shader"].setInput(n1["out"]["out"])
network = shaderAssignment["out"].attributes( "/sphere" )["osl:surface"]
self.assertEqual( len( network ), 3 )
self.assertEqual( network.getShader( "red1" ).name.split( "/" )[-1], "red" )
self.assertEqual( network.getShader( "green1" ).name.split( "/" )[-1], "green")
self.assertEqual( network.getShader( "add" ).name.split( "/" )[-1], "add")
# when we disable the add shader we should get the pass through parameter's ("a") shader (n2)
n1["enabled"].setValue( False )
network = shaderAssignment["out"].attributes( "/sphere" )["osl:surface"]
self.assertEqual( len ( network ), 1 )
self.assertEqual( network.getShader( "red1" ).name.split( "/" )[-1], "red" )
def testShaderSerialisation( self ) :
s = Gaffer.ScriptNode()
s['n2'] = GafferOSL.OSLShader()
s['n2'].loadShader( "Pattern/Noise" )
s['n'] = GafferOSL.OSLShader()
s['n'].loadShader( "Pattern/Noise" )
s['n2']['parameters']['scale'].setInput( s['n']['out']['n'] )
self.assertEqual( s['n2']['parameters']['scale'].getInput(), s['n']['out']['n'] )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2['n2']['parameters']['scale'].getInput(), s2['n']['out']['n'] )
def testSplineParameterSerialisation( self ) :
s = Gaffer.ScriptNode()
shad = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.osl" )
s['n'] = GafferOSL.OSLShader()
s['n'].loadShader( shad )
splineValue = Gaffer.SplineDefinitionfColor3f( [ ( random.random(), imath.Color3f( random.random(), random.random(), random.random() ) ) for i in range( 10 ) ], Gaffer.SplineDefinitionInterpolation.Linear )
s['n']["parameters"]["colorSpline"].setValue( splineValue )
serialised = s.serialise()
colorSplineLines = [ i for i in serialised.split( "\n" ) if "colorSpline" in i ]
# Expect a clearPoint line to get serialised
self.assertEqual( 1, sum( "clearPoints" in i for i in colorSplineLines ) )
# Expect 3 addChilds per point ( The parent plug, and x and y )
self.assertEqual( 30, sum( "addChild" in i for i in colorSplineLines ) )
s2 = Gaffer.ScriptNode()
s2.execute( serialised )
self.assertEqual( s2['n']["parameters"]["colorSpline"].getValue(), splineValue )
def testComponentToComponentConnections( self ) :
n1 = GafferOSL.OSLShader( "n1" )
n1.loadShader( "Maths/MixColor" )
n2 = GafferOSL.OSLShader( "n2" )
n2.loadShader( "Maths/MixColor" )
n2["parameters"]["a"]["r"].setInput( n1["out"]["out"]["g"] )
n2["parameters"]["a"]["g"].setInput( n1["out"]["out"]["b"] )
n2["parameters"]["a"]["b"].setInput( n1["out"]["out"]["r"] )
network = n2.attributes()["osl:shader"]
self.assertEqual(
network.inputConnections( "n2" ),
[
( ( "n1", "out.r" ), ( "n2", "a.b" ) ),
( ( "n1", "out.b" ), ( "n2", "a.g" ) ),
( ( "n1", "out.g" ), ( "n2", "a.r" ) ),
]
)
def testGetConnectedParameterValueInsideSceneNode( self ) :
n = GafferScene.SceneNode()
n["n1"] = GafferOSL.OSLShader()
n["n1"].loadShader( "Maths/AddColor" )
n["n2"] = GafferOSL.OSLShader()
n["n2"].loadShader( "Maths/AddColor" )
n["n2"]["parameters"]["a"].setInput( n["n1"]["out"]["out"] )
self.assertEqual( n["n2"]["parameters"]["a"].getValue(), imath.Color3f( 0 ) )
def testOutputNameIncludedInNetwork( self ) :
shader = GafferOSL.OSLShader( "globals" )
shader.loadShader( "Utility/Globals" )
shaderPlug = GafferScene.ShaderPlug()
shaderPlug.setInput( shader["out"] )
network1 = shaderPlug.attributes()["osl:shader"]
hash1 = shaderPlug.attributesHash()
shaderPlug.setInput( shader["out"]["globalP"] )
network2 = shaderPlug.attributes()["osl:shader"]
hash2 = shaderPlug.attributesHash()
shaderPlug.setInput( shader["out"]["globalN"] )
network3 = shaderPlug.attributes()["osl:shader"]
hash3 = shaderPlug.attributesHash()
self.assertEqual( network1.getOutput(), IECoreScene.ShaderNetwork.Parameter( "globals" ) )
self.assertEqual( network2.getOutput(), IECoreScene.ShaderNetwork.Parameter( "globals", "globalP" ) )
self.assertEqual( network3.getOutput(), IECoreScene.ShaderNetwork.Parameter( "globals", "globalN" ) )
self.assertEqual( network1.getShader( "global" ), network2.getShader( "global" ) )
self.assertEqual( network1.getShader( "global" ), network3.getShader( "global" ) )
self.assertNotEqual( hash1, hash2 )
self.assertNotEqual( hash2, hash3 )
def testShaderTypeAssignsAsSurfaceType( self ) :
plane = GafferScene.Plane()
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
shader = GafferOSL.OSLShader( "globals" )
shader.loadShader( "Maths/AddColor" )
shaderAssignment = GafferScene.ShaderAssignment()
shaderAssignment["in"].setInput( plane["out"] )
shaderAssignment["shader"].setInput( shader["out"]["out"] )
shaderAssignment["filter"].setInput( planeFilter["out"] )
self.assertEqual( shaderAssignment["out"].attributes( "/plane" ).keys(), [ "osl:surface" ] )
def testConstantOutPlug( self ) :
# For compatibility with Arnold, we hack an output closure
# parameter onto our Constant shader, but we don't want that
# to affect the way we represent the output plug in Gaffer.
shader = GafferOSL.OSLShader()
shader.loadShader( "Surface/Constant" )
self.assertEqual( len( shader["out"].children() ), 0 )
def testLoadMxInvertFloat( self ) :
s = Gaffer.ScriptNode()
s["fileName"].setValue( os.path.join( os.path.dirname( __file__ ), "scripts", "mxInvert-0.59.8.0.gfr" ) )
s.load()
self.assertEqual( s["mx_invert_float"]["parameters"]["in"].getValue(), 1 )
self.assertEqual( s["mx_invert_float"]["parameters"]["amount"].getValue(), 2 )
if __name__ == "__main__":
unittest.main()
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class spilloverpolicy_gslbvserver_binding(base_resource) :
""" Binding class showing the gslbvserver that can be bound to spilloverpolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def boundto(self) :
"""The name of the entity to which the policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""The name of the entity to which the policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def name(self) :
"""Name of the spillover policy.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the spillover policy.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def priority(self) :
"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
"""Indicates whether policy is bound or not.
"""
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(spilloverpolicy_gslbvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.spilloverpolicy_gslbvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch spilloverpolicy_gslbvserver_binding resources.
"""
try :
obj = spilloverpolicy_gslbvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of spilloverpolicy_gslbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = spilloverpolicy_gslbvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count spilloverpolicy_gslbvserver_binding resources configued on NetScaler.
"""
try :
obj = spilloverpolicy_gslbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of spilloverpolicy_gslbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = spilloverpolicy_gslbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class spilloverpolicy_gslbvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.spilloverpolicy_gslbvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.spilloverpolicy_gslbvserver_binding = [spilloverpolicy_gslbvserver_binding() for _ in range(length)]
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python2.7
"""Functions for downloading and reading MNIST data."""
from __future__ import print_function
import gzip
import os
import numpy
from six.moves import urllib
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
def maybe_download(filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded %s %d bytes.' % (filename, statinfo.st_size))
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting %s' % filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting %s' % filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
class DataSet(object):
"""Class encompassing test, validation and training MNIST data set."""
def __init__(self, images, labels, fake_data=False, one_hot=False):
"""Construct a DataSet. one_hot arg is used only if fake_data is true."""
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in range(batch_size)], [
fake_label for _ in range(batch_size)
]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir, fake_data=False, one_hot=False):
"""Return training, validation and testing data sets."""
class DataSets(object):
pass
data_sets = DataSets()
if fake_data:
data_sets.train = DataSet([], [], fake_data=True, one_hot=one_hot)
data_sets.validation = DataSet([], [], fake_data=True, one_hot=one_hot)
data_sets.test = DataSet([], [], fake_data=True, one_hot=one_hot)
return data_sets
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_images, train_labels)
data_sets.validation = DataSet(validation_images, validation_labels)
data_sets.test = DataSet(test_images, test_labels)
return data_sets
|
|
"""Support for Google travel time sensors."""
from __future__ import annotations
from datetime import datetime, timedelta
import logging
from googlemaps import Client
from googlemaps.distance_matrix import distance_matrix
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_API_KEY,
CONF_ENTITY_NAMESPACE,
CONF_MODE,
CONF_NAME,
CONF_SCAN_INTERVAL,
EVENT_HOMEASSISTANT_STARTED,
TIME_MINUTES,
)
from homeassistant.core import CoreState, HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
import homeassistant.util.dt as dt_util
from .const import (
ALL_LANGUAGES,
ATTRIBUTION,
AVOID,
CONF_ARRIVAL_TIME,
CONF_AVOID,
CONF_DEPARTURE_TIME,
CONF_DESTINATION,
CONF_LANGUAGE,
CONF_OPTIONS,
CONF_ORIGIN,
CONF_TRAFFIC_MODEL,
CONF_TRANSIT_MODE,
CONF_TRANSIT_ROUTING_PREFERENCE,
CONF_TRAVEL_MODE,
CONF_UNITS,
DEFAULT_NAME,
DOMAIN,
TRACKABLE_DOMAINS,
TRANSIT_PREFS,
TRANSPORT_TYPE,
TRAVEL_MODE,
TRAVEL_MODEL,
UNITS,
)
from .helpers import get_location_from_entity, resolve_zone
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_ORIGIN): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TRAVEL_MODE): vol.In(TRAVEL_MODE),
vol.Optional(CONF_OPTIONS, default={CONF_MODE: "driving"}): vol.All(
dict,
vol.Schema(
{
vol.Optional(CONF_MODE, default="driving"): vol.In(TRAVEL_MODE),
vol.Optional(CONF_LANGUAGE): vol.In(ALL_LANGUAGES),
vol.Optional(CONF_AVOID): vol.In(AVOID),
vol.Optional(CONF_UNITS): vol.In(UNITS),
vol.Exclusive(CONF_ARRIVAL_TIME, "time"): cv.string,
vol.Exclusive(CONF_DEPARTURE_TIME, "time"): cv.string,
vol.Optional(CONF_TRAFFIC_MODEL): vol.In(TRAVEL_MODEL),
vol.Optional(CONF_TRANSIT_MODE): vol.In(TRANSPORT_TYPE),
vol.Optional(CONF_TRANSIT_ROUTING_PREFERENCE): vol.In(
TRANSIT_PREFS
),
}
),
),
# Remove options to exclude from import
vol.Remove(CONF_ENTITY_NAMESPACE): cv.string,
vol.Remove(CONF_SCAN_INTERVAL): cv.time_period,
},
extra=vol.REMOVE_EXTRA,
)
def convert_time_to_utc(timestr):
"""Take a string like 08:00:00 and convert it to a unix timestamp."""
combined = datetime.combine(
dt_util.start_of_local_day(), dt_util.parse_time(timestr)
)
if combined < datetime.now():
combined = combined + timedelta(days=1)
return dt_util.as_timestamp(combined)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up a Google travel time sensor entry."""
if not config_entry.options:
new_data = config_entry.data.copy()
options = new_data.pop(CONF_OPTIONS, {})
if CONF_UNITS not in options:
options[CONF_UNITS] = hass.config.units.name
if CONF_TRAVEL_MODE in new_data:
wstr = (
"Google Travel Time: travel_mode is deprecated, please "
"add mode to the options dictionary instead!"
)
_LOGGER.warning(wstr)
travel_mode = new_data.pop(CONF_TRAVEL_MODE)
if CONF_MODE not in options:
options[CONF_MODE] = travel_mode
if CONF_MODE not in options:
options[CONF_MODE] = "driving"
hass.config_entries.async_update_entry(
config_entry, data=new_data, options=options
)
api_key = config_entry.data[CONF_API_KEY]
origin = config_entry.data[CONF_ORIGIN]
destination = config_entry.data[CONF_DESTINATION]
name = config_entry.data.get(CONF_NAME, DEFAULT_NAME)
client = Client(api_key, timeout=10)
sensor = GoogleTravelTimeSensor(
config_entry, name, api_key, origin, destination, client
)
async_add_entities([sensor], False)
async def async_setup_platform(
hass: HomeAssistant, config, add_entities_callback, discovery_info=None
):
"""Set up the Google travel time platform."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config,
)
)
_LOGGER.warning(
"Your Google travel time configuration has been imported into the UI; "
"please remove it from configuration.yaml as support for it will be "
"removed in a future release"
)
class GoogleTravelTimeSensor(SensorEntity):
"""Representation of a Google travel time sensor."""
def __init__(self, config_entry, name, api_key, origin, destination, client):
"""Initialize the sensor."""
self._name = name
self._config_entry = config_entry
self._unit_of_measurement = TIME_MINUTES
self._matrix = None
self._api_key = api_key
self._unique_id = config_entry.entry_id
self._client = client
# Check if location is a trackable entity
if origin.split(".", 1)[0] in TRACKABLE_DOMAINS:
self._origin_entity_id = origin
else:
self._origin = origin
if destination.split(".", 1)[0] in TRACKABLE_DOMAINS:
self._destination_entity_id = destination
else:
self._destination = destination
async def async_added_to_hass(self) -> None:
"""Handle when entity is added."""
if self.hass.state != CoreState.running:
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED, self.first_update
)
else:
await self.first_update()
@property
def native_value(self):
"""Return the state of the sensor."""
if self._matrix is None:
return None
_data = self._matrix["rows"][0]["elements"][0]
if "duration_in_traffic" in _data:
return round(_data["duration_in_traffic"]["value"] / 60)
if "duration" in _data:
return round(_data["duration"]["value"] / 60)
return None
@property
def device_info(self) -> DeviceInfo:
"""Return device specific attributes."""
return DeviceInfo(
entry_type="service",
identifiers={(DOMAIN, self._api_key)},
name=DOMAIN,
)
@property
def unique_id(self) -> str:
"""Return unique ID of entity."""
return self._unique_id
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if self._matrix is None:
return None
res = self._matrix.copy()
options = self._config_entry.options.copy()
res.update(options)
del res["rows"]
_data = self._matrix["rows"][0]["elements"][0]
if "duration_in_traffic" in _data:
res["duration_in_traffic"] = _data["duration_in_traffic"]["text"]
if "duration" in _data:
res["duration"] = _data["duration"]["text"]
if "distance" in _data:
res["distance"] = _data["distance"]["text"]
res["origin"] = self._origin
res["destination"] = self._destination
res[ATTR_ATTRIBUTION] = ATTRIBUTION
return res
@property
def native_unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
async def first_update(self, _=None):
"""Run the first update and write the state."""
await self.hass.async_add_executor_job(self.update)
self.async_write_ha_state()
def update(self):
"""Get the latest data from Google."""
options_copy = self._config_entry.options.copy()
dtime = options_copy.get(CONF_DEPARTURE_TIME)
atime = options_copy.get(CONF_ARRIVAL_TIME)
if dtime is not None and ":" in dtime:
options_copy[CONF_DEPARTURE_TIME] = convert_time_to_utc(dtime)
elif dtime is not None:
options_copy[CONF_DEPARTURE_TIME] = dtime
elif atime is None:
options_copy[CONF_DEPARTURE_TIME] = "now"
if atime is not None and ":" in atime:
options_copy[CONF_ARRIVAL_TIME] = convert_time_to_utc(atime)
elif atime is not None:
options_copy[CONF_ARRIVAL_TIME] = atime
# Convert device_trackers to google friendly location
if hasattr(self, "_origin_entity_id"):
self._origin = get_location_from_entity(
self.hass, _LOGGER, self._origin_entity_id
)
if hasattr(self, "_destination_entity_id"):
self._destination = get_location_from_entity(
self.hass, _LOGGER, self._destination_entity_id
)
self._destination = resolve_zone(self.hass, self._destination)
self._origin = resolve_zone(self.hass, self._origin)
if self._destination is not None and self._origin is not None:
self._matrix = distance_matrix(
self._client, self._origin, self._destination, **options_copy
)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepares an Eclipse project directory with Crosswalk added as a library.
It creates a new directory with a base, empty template, fills it with the
appropriate resources from Chromium and Crosswalk and optionally adds native
libraries such as libxwalkcore.so.
The output is used by other tools such as generate_app_packaging_tool.py and
generate_xwalk_core_library_aar.py.
"""
import argparse
import collections
import os
import shutil
import sys
GYP_ANDROID_DIR = os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir,
'build',
'android',
'gyp')
sys.path.append(GYP_ANDROID_DIR)
from util import build_utils
import package_resources
Resource = collections.namedtuple('Resource', ['filename', 'src'])
def CopyJSBindingFiles(js_files, output_dir):
res_raw_dir = os.path.join(output_dir, 'res', 'raw')
build_utils.MakeDirectory(res_raw_dir)
for js_file in js_files:
shutil.copy2(js_file, res_raw_dir)
def CopyMainJar(output_dir, jar_path):
libs_dir = os.path.join(output_dir, 'libs')
build_utils.MakeDirectory(libs_dir)
shutil.copy2(jar_path, libs_dir)
def CopyBinaryData(output_dir, binary_files):
res_raw_dir = os.path.join(output_dir, 'res', 'raw')
res_value_dir = os.path.join(output_dir, 'res', 'values')
build_utils.MakeDirectory(res_raw_dir)
build_utils.MakeDirectory(res_value_dir)
# Poor man's XML writer. It is safe to assume there are only ASCII characters
# in the entries we are going to write.
resource_file = os.path.join(res_value_dir, 'xwalk_resources_list.xml')
resource_top = """<?xml version="1.0" encoding="utf-8"?>
<resources>
<string-array name="xwalk_resources_list">
"""
resource_bottom = """</string-array>
</resources>
"""
with open(resource_file, 'w') as out_f:
out_f.write(resource_top)
for binary_file in binary_files:
shutil.copy2(binary_file, res_raw_dir)
out_f.write("<item>%s</item>\n" % os.path.basename(binary_file))
out_f.write(resource_bottom)
def CopyNativeLibraries(output_dir, abi_name, native_libraries):
destination_path = os.path.join(output_dir, 'libs', abi_name)
build_utils.MakeDirectory(destination_path)
for native_lib in native_libraries:
shutil.copy2(native_lib, destination_path)
def CopyResources(output_dir, resources, resource_strings):
res_dir = os.path.join(output_dir, 'res')
build_utils.MakeDirectory(res_dir)
def _resource_predicate(name):
"""Predicate for the ExtractAll() call below. Makes sure only the files we
want are extracted."""
if name == 'OWNERS':
return False
_, ext = os.path.splitext(name)
if ext not in ('.png', '.wav', '.xml'):
# We raise an exception here because if there is a new file type being
# packaged we need to check what changed compared to what was going on
# before.
raise ValueError("Unexpected file type: %s" % name)
return True
# Part 1: extract the partly-processed resource zip files (which do not
# include the .grd string zips), making sure we replace crunched 9-patch
# images with the original ones and avoiding file name colisions.
for index, resource in enumerate(resources):
with build_utils.TempDir() as temp_dir:
temp_res_dir = os.path.join(temp_dir, 'res')
build_utils.ExtractAll(resource.filename, path=temp_res_dir,
predicate=_resource_predicate)
for dirpath, _, filenames in os.walk(temp_res_dir):
if dirpath == temp_res_dir: # Do not create res/res/.
continue
res_dir_subpath = os.path.join(res_dir, os.path.basename(dirpath))
build_utils.MakeDirectory(res_dir_subpath)
for filename in filenames:
if filename.endswith('.9.png'):
# 9-patch files need to be handled specially. We need the original,
# uncrunched versions to avoid crunching them twice and failing
# (once when building the resources, and then when the user is
# building their project with Crosswalk).
original_9p = os.path.join(resource.src,
os.path.basename(dirpath),
filename)
if not os.path.isfile(original_9p):
raise IOError("Expected to find %s." % original_9p)
shutil.copy2(original_9p, os.path.join(dirpath, filename))
# Avoid ovewriting existing files.
root, ext = os.path.splitext(filename)
if os.path.isfile(os.path.join(res_dir_subpath, filename)):
destname = '%s_%02d%s' % (root, index, ext)
else:
destname = filename
shutil.copy2(os.path.join(dirpath, filename),
os.path.join(res_dir_subpath, destname))
package_resources.MoveImagesToNonMdpiFolders(res_dir)
# Part 2: extract .xml strings files (made from .grd files).
for zip_file in resource_strings:
build_utils.ExtractAll(zip_file, path=res_dir)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--abi',
help='Android ABI being used in the build.')
parser.add_argument('--binary-files',
help='Binary files to store in res/raw.')
parser.add_argument('--js-bindings', required=True,
help='.js files to copy to res/raw.')
parser.add_argument('--main-jar', required=True,
help='Path to the main JAR to copy to libs/.')
parser.add_argument('--native-libraries',
help='List of libraries to copy to libs/<abi>.')
parser.add_argument('--output-dir', required=True,
help='Directory where the project will be created.')
parser.add_argument('--resource-strings', default='',
help='List of zipped .grd files.')
parser.add_argument('--resource-zip-sources', default='',
help='Source directories corresponding to each zipped '
'resource file from --resource-zips.')
parser.add_argument('--resource-zips', default='',
help='Zipped, processed resource files.')
parser.add_argument('--stamp', required=True,
help='Path to touch on success.')
parser.add_argument('--template-dir', required=True,
help='Directory with an empty app template.')
options = parser.parse_args()
options.resource_strings = build_utils.ParseGypList(options.resource_strings)
options.resource_zips = build_utils.ParseGypList(options.resource_zips)
options.resource_zip_sources = build_utils.ParseGypList(
options.resource_zip_sources)
if len(options.resource_zips) != len(options.resource_zip_sources):
print('--resource-zips and --resource-zip-sources must have the same '
'number of arguments.')
return 1
resources = []
for resource_zip, resource_src in zip(options.resource_zips,
options.resource_zip_sources):
resources.append(Resource(filename=resource_zip, src=resource_src))
# Copy Eclipse project files of library project.
build_utils.DeleteDirectory(options.output_dir)
shutil.copytree(options.template_dir, options.output_dir)
# Copy binaries and resources.
CopyResources(options.output_dir, resources, options.resource_strings)
CopyMainJar(options.output_dir, options.main_jar)
if options.binary_files:
CopyBinaryData(options.output_dir,
build_utils.ParseGypList(options.binary_files))
if options.native_libraries:
CopyNativeLibraries(options.output_dir, options.abi,
build_utils.ParseGypList(options.native_libraries))
# Copy JS API binding files.
CopyJSBindingFiles(build_utils.ParseGypList(options.js_bindings),
options.output_dir)
# Create an empty src/.
build_utils.MakeDirectory(os.path.join(options.output_dir, 'src'))
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import platform
import socket
import sys
import mock
from oslo_config import cfg
from six.moves import range
from nova.compute import flavors
import nova.context
import nova.db
from nova import exception
from nova.image import glance
from nova.network import minidns
from nova.network import model as network_model
from nova import objects
import nova.utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
def get_test_admin_context():
return nova.context.get_admin_context()
def get_test_image_info(context, instance_ref):
if not context:
context = get_test_admin_context()
image_ref = instance_ref['image_ref']
image_service, image_id = glance.get_remote_image_service(context,
image_ref)
return image_service.show(context, image_id)
def get_test_flavor(context=None, options=None):
options = options or {}
if not context:
context = get_test_admin_context()
test_flavor = {'name': 'kinda.big',
'flavorid': 'someid',
'memory_mb': 2048,
'vcpus': 4,
'root_gb': 40,
'ephemeral_gb': 80,
'swap': 1024}
test_flavor.update(options)
try:
flavor_ref = nova.db.flavor_create(context, test_flavor)
except (exception.FlavorExists, exception.FlavorIdExists):
flavor_ref = nova.db.flavor_get_by_name(context, 'kinda.big')
return flavor_ref
def get_test_instance(context=None, flavor=None, obj=False):
if not context:
context = get_test_admin_context()
if not flavor:
flavor = get_test_flavor(context)
test_instance = {'memory_kb': '2048000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 4,
'root_gb': 40,
'bridge': 'br101',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'instance_type_id': flavor['id'],
'system_metadata': {},
'extra_specs': {},
'user_id': context.user_id,
'project_id': context.project_id,
}
if obj:
instance = objects.Instance(context, **test_instance)
with mock.patch.object(instance, 'save'):
instance.set_flavor(objects.Flavor.get_by_id(context,
flavor['id']))
instance.create()
else:
flavors.save_flavor_info(test_instance['system_metadata'], flavor, '')
instance = nova.db.instance_create(context, test_instance)
return instance
def get_test_network_info(count=1):
ipv6 = CONF.use_ipv6
fake = 'fake'
fake_ip = '0.0.0.0'
fake_vlan = 100
fake_bridge_interface = 'eth0'
def current():
subnet_4 = network_model.Subnet(cidr=fake_ip,
dns=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
dhcp_server=fake_ip)
subnet_6 = network_model.Subnet(cidr=fake_ip,
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
version=6)
subnets = [subnet_4]
if ipv6:
subnets.append(subnet_6)
network = network_model.Network(id=None,
bridge=fake,
label=None,
subnets=subnets,
vlan=fake_vlan,
bridge_interface=fake_bridge_interface,
injected=False)
vif = network_model.VIF(id='vif-xxx-yyy-zzz',
address=fake,
network=network,
type=network_model.VIF_TYPE_BRIDGE,
devname=None,
ovs_interfaceid=None)
return vif
return network_model.NetworkInfo([current() for x in range(0, count)])
def is_osx():
return platform.mac_ver()[0] != ''
def is_linux():
return platform.system() == 'Linux'
def coreutils_readlink_available():
_out, err = nova.utils.trycmd('readlink', '-nm', '/')
return err == ''
test_dns_managers = []
def dns_manager():
global test_dns_managers
manager = minidns.MiniDNS()
test_dns_managers.append(manager)
return manager
def cleanup_dns_managers():
global test_dns_managers
for manager in test_dns_managers:
manager.delete_dns_file()
test_dns_managers = []
def killer_xml_body():
return (("""<!DOCTYPE x [
<!ENTITY a "%(a)s">
<!ENTITY b "%(b)s">
<!ENTITY c "%(c)s">]>
<foo>
<bar>
<v1>%(d)s</v1>
</bar>
</foo>""") % {
'a': 'A' * 10,
'b': '&a;' * 10,
'c': '&b;' * 10,
'd': '&c;' * 9999,
}).strip()
def is_ipv6_supported():
has_ipv6_support = socket.has_ipv6
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.close()
except socket.error as e:
if e.errno == errno.EAFNOSUPPORT:
has_ipv6_support = False
else:
raise
# check if there is at least one interface with ipv6
if has_ipv6_support and sys.platform.startswith('linux'):
try:
with open('/proc/net/if_inet6') as f:
if not f.read():
has_ipv6_support = False
except IOError:
has_ipv6_support = False
return has_ipv6_support
def get_api_version(request):
if request.path[2:3].isdigit():
return int(request.path[2:3])
|
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api
from neutron.common import constants
from neutron.db import api as dbapi
from neutron.db import common_db_mixin as base_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import metering
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
class MeteringLabelRule(model_base.BASEV2, models_v2.HasId):
direction = sa.Column(sa.Enum('ingress', 'egress',
name='meteringlabels_direction'))
remote_ip_prefix = sa.Column(sa.String(64))
metering_label_id = sa.Column(sa.String(36),
sa.ForeignKey("meteringlabels.id",
ondelete="CASCADE"),
nullable=False)
excluded = sa.Column(sa.Boolean, default=False, server_default=sql.false())
class MeteringLabel(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
rules = orm.relationship(MeteringLabelRule, backref="label",
cascade="delete", lazy="joined")
routers = orm.relationship(
l3_db.Router,
primaryjoin="MeteringLabel.tenant_id==Router.tenant_id",
foreign_keys='MeteringLabel.tenant_id',
uselist=True)
class MeteringDbMixin(metering.MeteringPluginBase,
base_db.CommonDbMixin):
def __init__(self):
dbapi.register_models()
self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI()
def _make_metering_label_dict(self, metering_label, fields=None):
res = {'id': metering_label['id'],
'name': metering_label['name'],
'description': metering_label['description'],
'tenant_id': metering_label['tenant_id']}
return self._fields(res, fields)
def create_metering_label(self, context, metering_label):
m = metering_label['metering_label']
tenant_id = self._get_tenant_id_for_create(context, m)
with context.session.begin(subtransactions=True):
metering_db = MeteringLabel(id=uuidutils.generate_uuid(),
description=m['description'],
tenant_id=tenant_id,
name=m['name'])
context.session.add(metering_db)
return self._make_metering_label_dict(metering_db)
def delete_metering_label(self, context, label_id):
with context.session.begin(subtransactions=True):
try:
label = self._get_by_id(context, MeteringLabel, label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
context.session.delete(label)
def get_metering_label(self, context, label_id, fields=None):
try:
metering_label = self._get_by_id(context, MeteringLabel, label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
return self._make_metering_label_dict(metering_label, fields)
def get_metering_labels(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_labels', limit,
marker)
return self._get_collection(context, MeteringLabel,
self._make_metering_label_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def _make_metering_label_rule_dict(self, metering_label_rule, fields=None):
res = {'id': metering_label_rule['id'],
'metering_label_id': metering_label_rule['metering_label_id'],
'direction': metering_label_rule['direction'],
'remote_ip_prefix': metering_label_rule['remote_ip_prefix'],
'excluded': metering_label_rule['excluded']}
return self._fields(res, fields)
def get_metering_label_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_label_rules',
limit, marker)
return self._get_collection(context, MeteringLabelRule,
self._make_metering_label_rule_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_metering_label_rule(self, context, rule_id, fields=None):
try:
metering_label_rule = self._get_by_id(context,
MeteringLabelRule, rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
return self._make_metering_label_rule_dict(metering_label_rule, fields)
def _validate_cidr(self, context, label_id, remote_ip_prefix,
direction, excluded):
r_ips = self.get_metering_label_rules(context,
filters={'metering_label_id':
label_id,
'direction':
[direction],
'excluded':
[excluded]},
fields=['remote_ip_prefix'])
cidrs = [r['remote_ip_prefix'] for r in r_ips]
new_cidr_ipset = netaddr.IPSet([remote_ip_prefix])
if (netaddr.IPSet(cidrs) & new_cidr_ipset):
raise metering.MeteringLabelRuleOverlaps(remote_ip_prefix=
remote_ip_prefix)
def create_metering_label_rule(self, context, metering_label_rule):
m = metering_label_rule['metering_label_rule']
with context.session.begin(subtransactions=True):
label_id = m['metering_label_id']
ip_prefix = m['remote_ip_prefix']
direction = m['direction']
excluded = m['excluded']
self._validate_cidr(context, label_id, ip_prefix, direction,
excluded)
metering_db = MeteringLabelRule(id=uuidutils.generate_uuid(),
metering_label_id=label_id,
direction=direction,
excluded=m['excluded'],
remote_ip_prefix=ip_prefix)
context.session.add(metering_db)
return self._make_metering_label_rule_dict(metering_db)
def delete_metering_label_rule(self, context, rule_id):
with context.session.begin(subtransactions=True):
try:
rule = self._get_by_id(context, MeteringLabelRule, rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
context.session.delete(rule)
def _get_metering_rules_dict(self, metering_label):
rules = []
for rule in metering_label.rules:
rule_dict = self._make_metering_label_rule_dict(rule)
rules.append(rule_dict)
return rules
def _make_router_dict(self, router):
res = {'id': router['id'],
'name': router['name'],
'tenant_id': router['tenant_id'],
'admin_state_up': router['admin_state_up'],
'status': router['status'],
'gw_port_id': router['gw_port_id'],
constants.METERING_LABEL_KEY: []}
return res
def _process_sync_metering_data(self, labels):
routers_dict = {}
for label in labels:
routers = label.routers
for router in routers:
router_dict = routers_dict.get(
router['id'],
self._make_router_dict(router))
rules = self._get_metering_rules_dict(label)
data = {'id': label['id'], 'rules': rules}
router_dict[constants.METERING_LABEL_KEY].append(data)
routers_dict[router['id']] = router_dict
return routers_dict.values()
def get_sync_data_metering(self, context, label_id=None, router_ids=None):
labels = context.session.query(MeteringLabel)
if label_id:
labels = labels.filter(MeteringLabel.id == label_id)
elif router_ids:
labels = (labels.join(MeteringLabel.routers).
filter(l3_db.Router.id.in_(router_ids)))
return self._process_sync_metering_data(labels)
|
|
# single-file pure-python ed25519 digital signatures, rearranged to minimize
# the namespace pollution so this can be embedded in another file. Adapted
# from https://bitbucket.org/dholth/ed25519ll
# Ed25519 digital signatures
# Based on http://ed25519.cr.yp.to/python/ed25519.py
# See also http://ed25519.cr.yp.to/software.html
# Adapted by Ron Garret
# Sped up considerably using coordinate transforms found on:
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
# Specifically add-2008-hwcd-4 and dbl-2008-hwcd
def Ed25519():
# don't add many names to the file we're copied into
try: # pragma nocover
unicode
PY3 = False
def asbytes(b):
"""Convert array of integers to byte string"""
return ''.join(chr(x) for x in b)
def joinbytes(b):
"""Convert array of bytes to byte string"""
return ''.join(b)
def bit(h, i):
"""Return i'th bit of bytestring h"""
return (ord(h[i//8]) >> (i%8)) & 1
except NameError: # pragma nocover
PY3 = True
asbytes = bytes
joinbytes = bytes
def bit(h, i):
return (h[i//8] >> (i%8)) & 1
import hashlib
b = 256
q = 2**255 - 19
l = 2**252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def expmod(b, e, m):
if e == 0: return 1
t = expmod(b, e // 2, m) ** 2 % m
if e & 1: t = (t * b) % m
return t
# Can probably get some extra speedup here by replacing this with
# an extended-euclidean, but performance seems OK without that
def inv(x):
return expmod(x, q-2, q)
d = -121665 * inv(121666)
I = expmod(2,(q-1)//4,q)
def xrecover(y):
xx = (y*y-1) * inv(d*y*y+1)
x = expmod(xx,(q+3)//8,q)
if (x*x - xx) % q != 0: x = (x*I) % q
if x % 2 != 0: x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = [Bx % q,By % q]
#def edwards(P,Q):
# x1 = P[0]
# y1 = P[1]
# x2 = Q[0]
# y2 = Q[1]
# x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2)
# y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2)
# return (x3 % q,y3 % q)
#def scalarmult(P,e):
# if e == 0: return [0,1]
# Q = scalarmult(P,e/2)
# Q = edwards(Q,Q)
# if e & 1: Q = edwards(Q,P)
# return Q
# Faster (!) version based on:
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
def xpt_add(pt1, pt2):
(X1, Y1, Z1, T1) = pt1
(X2, Y2, Z2, T2) = pt2
A = ((Y1-X1)*(Y2+X2)) % q
B = ((Y1+X1)*(Y2-X2)) % q
C = (Z1*2*T2) % q
D = (T1*2*Z2) % q
E = (D+C) % q
F = (B-A) % q
G = (B+A) % q
H = (D-C) % q
X3 = (E*F) % q
Y3 = (G*H) % q
Z3 = (F*G) % q
T3 = (E*H) % q
return (X3, Y3, Z3, T3)
def xpt_double (pt):
(X1, Y1, Z1, _) = pt
A = (X1*X1)
B = (Y1*Y1)
C = (2*Z1*Z1)
D = (-A) % q
J = (X1+Y1) % q
E = (J*J-A-B) % q
G = (D+B) % q
F = (G-C) % q
H = (D-B) % q
X3 = (E*F) % q
Y3 = (G*H) % q
Z3 = (F*G) % q
T3 = (E*H) % q
return (X3, Y3, Z3, T3)
def pt_xform (pt):
(x, y) = pt
return (x, y, 1, (x*y)%q)
def pt_unxform (pt):
(x, y, z, _) = pt
return ((x*inv(z))%q, (y*inv(z))%q)
def xpt_mult (pt, n):
if n==0: return pt_xform((0,1))
_ = xpt_double(xpt_mult(pt, n>>1))
return xpt_add(_, pt) if n&1 else _
def scalarmult(pt, e):
return pt_unxform(xpt_mult(pt_xform(pt), e))
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
e = [(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b//8)]
return asbytes(e)
def encodepoint(P):
x = P[0]
y = P[1]
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
e = [(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b//8)]
return asbytes(e)
def publickey(sk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
A = scalarmult(B,a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2**i * bit(h,i) for i in range(2*b))
def signature(m,sk,pk):
sk = sk[:32]
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
inter = joinbytes([h[i] for i in range(b//8,b//4)])
r = Hint(inter + m)
R = scalarmult(B,r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
x = P[0]
y = P[1]
return (-x*x + y*y - 1 - d*x*x*y*y) % q == 0
def decodeint(s):
return sum(2**i * bit(s,i) for i in range(0,b))
def decodepoint(s):
y = sum(2**i * bit(s,i) for i in range(0,b-1))
x = xrecover(y)
if x & 1 != bit(s,b-1): x = q-x
P = [x,y]
if not isoncurve(P): raise Exception("decoding point that is not on curve")
return P
def checkvalid(s, m, pk):
if len(s) != b//4: raise Exception("signature length is wrong")
if len(pk) != b//8: raise Exception("public-key length is wrong")
R = decodepoint(s[0:b//8])
A = decodepoint(pk)
S = decodeint(s[b//8:b//4])
h = Hint(encodepoint(R) + pk + m)
v1 = scalarmult(B,S)
# v2 = edwards(R,scalarmult(A,h))
v2 = pt_unxform(xpt_add(pt_xform(R), pt_xform(scalarmult(A, h))))
return v1==v2
import os
def create_signing_key():
seed = os.urandom(32)
return seed
def create_verifying_key(signing_key):
return publickey(signing_key)
def sign(skbytes, msg):
"""Return just the signature, given the message and just the secret
key."""
if len(skbytes) != 32:
raise ValueError("Bad signing key length %d" % len(skbytes))
vkbytes = create_verifying_key(skbytes)
sig = signature(msg, skbytes, vkbytes)
return sig
def verify(vkbytes, sig, msg):
if len(vkbytes) != 32:
raise ValueError("Bad verifying key length %d" % len(vkbytes))
if len(sig) != 64:
raise ValueError("Bad signature length %d" % len(sig))
rc = checkvalid(sig, msg, vkbytes)
if not rc:
raise ValueError("rc != 0", rc)
return True
return (create_signing_key, create_verifying_key, sign, verify)
(ed25519_create_signing_key, ed25519_create_verifying_key,
ed25519_sign, ed25519_verify) = Ed25519()
## sk = ed25519_create_signing_key()
## msg = "hello world"
## sig = ed25519_sign(sk, msg)
## assert len(sig) == 64
## vk = ed25519_create_verifying_key(sk)
## ed25519_verify(vk, sig, msg)
## print "ok"
|
|
import asyncio
import struct
import dictionary
import datetime
import tests
from collections import *
from os import listdir
from os.path import isfile, join
from enum import Enum
from dicotomix import Dicotomix, Direction, NotFoundException, OrderException
import unidecode
import sys
import numpy as np
ENABLE_TESTS = False
ENABLE_NGRAMS_LETTER = True
ENABLE_ELAG = False
grams = {}
spelling_buffer = []
default_letters = []
def _boundPrefix(left, right):
k = 0
for i in range(min(len(left),len(right))):
if left[i] != right[i]:
break
k += 1
return k
class _StateID(Enum):
HEADER = 0
LEN = 1
STR = 2
class _NetworkState:
def __init__(self):
self.header = None
self.len = None
self.str = None
def state(self):
if self.header == None:
return _StateID.HEADER
elif self.len == None:
return _StateID.LEN
else:
return _StateID.STR
DATA_PATH = "data/"
class Server(asyncio.Protocol):
def __init__(self):
self.dicotomix = None
self.words = None
self.buffer = []
self.state = _NetworkState()
self.spelling = False
self.users = []
self.login = None
self.logFile = None
def _log(self, header, message):
if self.logFile == None:
return
self.logFile.write('{:%Y-%m-%d %H:%M:%S}|{}|{}\n'.format(
datetime.datetime.now(),
header,
message
))
def connection_made(self, transport):
self.transport = transport
self.address = transport.get_extra_info('peername')
print('Connection accepted: {}'.format(*self.address))
def data_received(self, data):
self.buffer += data
while self.consume_buffer():
pass
def consume_buffer(self):
if self.state.state() == _StateID.HEADER and len(self.buffer) >= 1:
self.state.header = self.buffer[0]
self._log('NET', 'header:{}'.format(self.state.header))
return True
elif self.state.state() == _StateID.LEN and len(self.buffer) >= 3:
self.state.len = struct.unpack('>h', bytes(self.buffer[1 : 3]))[0]
self._log('NET', 'len:{}'.format(self.state.len))
return True
elif self.state.state() == _StateID.STR and len(self.buffer) >= 3 + self.state.len:
self.state.str = bytes(self.buffer[3 : 3 + self.state.len]).decode('utf-8')
self._log('NET', 'str:{}'.format(self.state.str))
self.process()
self.buffer = self.buffer[3 + self.state.len : ]
self.state = _NetworkState()
return True
return False
def process(self):
global spelling_buffer, grams, default_letters
left = None
word = None
right = None
try:
if self.state.header == 1:
self._log('DIC', 'restart')
left, word, right = self.dicotomix.nextWord(Direction.START, self.spelling)
print("ICI: ",len(self.dicotomix._words))
elif self.state.header == 2:
self._log('DIC', 'go_left')
left, word, right = self.dicotomix.nextWord(Direction.LEFT, self.spelling)
elif self.state.header == 3:
self._log('DIC', 'go_right')
left, word, right = self.dicotomix.nextWord(Direction.RIGHT, self.spelling)
elif self.state.header == 4:
self._log('DIC', 'discard')
left, word, right = self.dicotomix.discard()
elif self.state.header == 5: # spelling mode
self.dicotomix.toggleSpelling()
self.spelling = not self.spelling
spelling_buffer = []
if self.spelling:
default_letters = self.dicotomix._words
self._log('DIC', 'start_spelling')
else:
self.dicotomix._letters = default_letters[:]
self._EPSILON2 = self._FIRST_EPSILON2
self._log('DIC', 'stop_selling')
return
elif self.state.header == 6: # send users list
onlyfiles = [f for f in listdir(DATA_PATH) if isfile(join(DATA_PATH, f))]
for f in onlyfiles:
name, ext = f.split('.')
if ext == 'data':
self.users.append(name)
self.users.append("[new]")
data = '\n'.join(self.users).encode('utf8')
self.transport.write(struct.pack('>h', len(data)))
self.transport.write(struct.pack('>h', 0))
self.transport.write(data)
return
elif self.state.header == 7: # get user name
if self.login != None:
return
if self.state.str not in self.users:
print('Create user ' + self.state.str)
open(DATA_PATH + self.state.str + '.data', 'a').close()
addenda = ''
if ENABLE_ELAG == True:
addenda = '_elag'
self.login = self.state.str
words, letters = dictionary.loadDictionary2(
DATA_PATH + 'new_lexique'+addenda+'.csv',
DATA_PATH + self.login + '.data'
)
self.words = words
self.logFile = open(DATA_PATH + self.login + '.log', 'a')
self._log('DIC', 'connected:{}'.format(self.login))
# extract (cumulative frequency, word) from the whole dictionary
feed_words = dictionary.computeFeed(words)
feed_letters = dictionary.computeFeed(letters)
#for w in feed_words[:100]:
#print(w)
self.dicotomix = Dicotomix(feed_words, feed_letters)
if ENABLE_TESTS:
tests.testAll(Dicotomix(feed_words), feed_words, self.words)
if ENABLE_NGRAMS_LETTER:
grams = tests.ngram_letter(Dicotomix(feed_words), feed_words, self.words)
return
elif self.state.header == 8: # custom word
if self.spelling or len(self.state.str) == 0:
return
self._log('DIC', 'add_word:{}'.format(self.state.str))
freq = 1000.
normalized = dictionary.normalize(self.state.str)
add = False
if normalized not in self.words:
self.words[normalized] = [freq, [self.state.str]]
add = True
elif self.state.str not in self.words[normalized][1]:
self.words[normalized][0] += freq
self.words[normalized][1].append(self.state.str)
add = True
if add:
file = open(DATA_PATH + self.login + '.data', 'a')
file.write('{}|{}|{}\n'.format(
self.state.str,
normalized,
freq
))
file.close()
self.words = OrderedDict(sorted(
self.words.items(),
key = lambda x: x[0]
))
feed_words = dictionary.computeFeed(self.words)
self.dicotomix.reinit(feed_words)
else:
self._log('DIC', 'already_exists')
return
elif self.state.header == 9: #validate letter in spelling mode
spelling_buffer.append(self.state.str)
print(spelling_buffer)
H = 0.0
for (i,w) in enumerate(self.dicotomix._words[1:]):
print(w[1],self.dicotomix._wordLength(i))
H += self.dicotomix._wordLength(i)*np.log(self.dicotomix._wordLength(i))
H /= -np.log(26)
print("Old H: ", H)
the_end = ''.join(spelling_buffer[-4:])
if the_end in grams:
our_distro = grams[the_end]
default_val = 1
print(our_distro)
print(default_val)
new_letters = [[0.0,'a']]
for f,l in self.dicotomix._words[1:]:
if l in our_distro:
new_letters.append([our_distro[l]*1000,l])
else:
new_letters.append([default_val,l])
to_print = new_letters[:]
to_print.sort(reverse=True, key=lambda x: x[0])
for a in to_print:
print(a[1], a[0])
the_sum = 0.0
for i in range(len(new_letters)):
the_sum += new_letters[i][0]
new_letters[i][0] = the_sum
for i in range(len(new_letters)):
new_letters[i][0] /= the_sum
for i in range(len(new_letters)):
new_letters[i] = (new_letters[i][0],new_letters[i][1])
#for f,l in new_letters:
#print(f,l)
self.dicotomix._words = new_letters[:]
H = 0.0
for (i,w) in enumerate(self.dicotomix._words[1:]):
print(w[1],self.dicotomix._wordLength(i))
H += self.dicotomix._wordLength(i)*np.log(self.dicotomix._wordLength(i))
H /= -np.log(26)
self.dicotomix._EPSILON2 = 1-H
print("New H: ", H)
else:
self.dicotomix._words = default_letters[:]
return
except NotFoundException:
self._log('DIC', 'not_found_exception')
if self.spelling:
self._log('DIC', 'auto_restart')
left, word, right = self.dicotomix.nextWord(Direction.START)
else:
self._log('DIC', 'auto_spelling')
dummy = 'a'.encode('utf8')
self.transport.write(struct.pack('>h', len(dummy)))
self.transport.write(struct.pack('>h', -1)) # ask UI to start spelling mode
self.transport.write(dummy)
return
except OrderException:
self._log('NET', 'order_exception')
return
except AttributeError:
self._log('NET', 'attribute_error')
return
self._log('DIC', 'words:{}:{}:{}'.format(left, word, right))
prefix = _boundPrefix(left, right)
self._log('DIC', 'prefix:{}'.format(prefix))
if not self.spelling:
if word != 'a' and word != '.':
words = filter(lambda x: len(x) > 1, self.words[word][1])
else:
words = self.words[word][1]
else:
words = filter(lambda x: x[0] != '[', self.words[word][1])
if self.spelling:
print(spelling_buffer)
to_send = list(words)
canonique = ''
for k in to_send:
if len(k) != 1:
continue
canonique = unidecode.unidecode(k)
break
i_can = 0
for (i,k) in enumerate(to_send):
if k == canonique:
i_can = i
to_send[0],to_send[i_can] = to_send[i_can],to_send[0]
data = '\n'.join(to_send)
data = data.encode('utf8')
self.transport.write(struct.pack('>h', len(data)))
self.transport.write(struct.pack('>h', prefix))
self.transport.write(data)
def connection_lost(self, error):
if self.logFile != None:
self._log('NET', 'disconnected:{}'.format(self.login))
self.logFile.close()
if error:
print('ERROR: {}'.format(error))
else:
print('Closing connection')
super().connection_lost(error)
exit(0)
|
|
import bson.json_util as json
from bson import ObjectId
from flask import session, request, Response
from flask_login import current_user
from mongoengine.errors import DoesNotExist
import pickle
from eat.models.application import Application, Applicant, Income, Child, Ethnicity, Person
from ..forms.applicant import ApplicantForm
from ..forms.income import IncomeForm
from ..forms.person import ChildForm, PersonForm, ChildStatusForm
from ..forms.ethnicity import EthnicityForm
def inject_application(f):
def decorator(**kwargs):
application = None
application_id = session.get('application_id')
if application_id:
application = Application.objects(id=application_id).first()
if application:
response = f(application, **kwargs)
return response
application = None
if current_user.is_authenticated:
# User may have applications they previously created
# load the latest one into the session
if current_user.applications:
for a in current_user.applications:
if not application:
application = a
else:
if a.created_at > application.created_at:
application = a
session['application_id'] = application.id
else:
# Need to create a new application
application = Application()
application.save()
session['application_id'] = application.id
else:
# Need to create a new application
application = Application()
application.applicant = Applicant()
application.save()
session['application_id'] = application.id
response = f(application, **kwargs)
return response
return decorator
def register_routes(app):
@app.route('/svc/eat/v1/application/applicant', methods=['GET', 'POST'],
endpoint='svc_eat_v1_application_applicant')
@inject_application
def svc_eat_v1_application_applicant(application):
applicant_form = ApplicantForm(csrf_enabled=False)
if request.method == 'GET':
if application.applicant:
return json.dumps(application.applicant.dict)
else:
return Response(
response=json.dumps({'errors': 'Applicant does not exist.', 'form': applicant_form.data}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
application.applicant = application.applicant or Applicant()
if not applicant_form.validate_on_submit():
return Response(
response=json.dumps({'errors': applicant_form.errors, 'form': applicant_form.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
for field in ['last_name', 'middle_initial', 'first_name', 'address_1',
'address_2', 'apt', 'city', 'state', 'postal', 'ssn', 'snap_case', 'tanf_case', 'fdipr_case']:
application.applicant[field] = applicant_form.data[field]
application.save()
return Response(response=json.dumps(application.applicant.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application', methods=['GET', 'POST'], endpoint='svc_eat_v1_application')
@inject_application
def svc_eat_v1_application(application):
return json.dumps(application.dict)
@app.route('/svc/eat/v1/application/applicant/incomes', methods=['GET', 'POST'],
endpoint='svc_eat_v1_application_applicant_incomes')
@inject_application
def svc_eat_v1_application_applicant_incomes(application):
income_form = IncomeForm(csrf_enabled=False)
if request.method == 'GET':
if not application.applicant:
return Response(
response=json.dumps({'errors': 'Applicant does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
if application.applicant and application.applicant.incomes:
return json.dumps([i.dict for i in application.applicant.incomes])
else:
return Response(
response=json.dumps({'errors': 'Applicant income does not exist.', 'form': income_form.data}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
if not income_form.validate_on_submit():
return Response(
response=json.dumps({'errors': income_form.errors, 'form': income_form.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
income = Income()
for field in ['source', 'amount', 'frequency']:
if income_form.data[field]:
income[field] = income_form.data[field]
application.applicant.incomes.append(income)
application.save()
return Response(response=json.dumps(income.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/applicant/incomes/<income_id>', methods=['GET', 'DELETE'],
endpoint='svc_eat_v1_application_applicant_incomes_income_id')
@inject_application
def svc_eat_v1_application_applicant_incomes_income_id(application, income_id):
application.applicant = application.applicant or Applicant()
try:
income = application.applicant.incomes.get(_id=ObjectId(income_id))
if request.method == 'GET':
return json.dumps(income.dict)
else:
application.applicant.incomes.remove(income)
application.save()
return Response(response=None,
status=204, headers=None)
except DoesNotExist:
return Response(
response=json.dumps({'errors': 'Income does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
except Exception:
return Response(
response=json.dumps({'errors': 'The income could not be queried'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/children', methods=['GET', 'POST'],
endpoint='svc_eat_v1_application_children')
@inject_application
def svc_eat_v1_application_children(application):
child_form = ChildForm(csrf_enabled=False)
if request.method == 'GET':
if application.children:
return json.dumps([c.dict for c in application.children])
else:
return Response(
response=json.dumps({'errors': 'Children do not exist.', 'form': child_form.data}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
if not child_form.validate_on_submit():
return Response(
response=json.dumps({'errors': child_form.errors, 'form': child_form.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
child = Child()
for field in ['first_name', 'middle_initial', 'last_name', 'school_postal', 'school_city', 'school_state',
'school_name']:
if child_form.data[field]:
child[field] = child_form.data[field]
application.children.append(child)
application.save()
return Response(response=json.dumps(child.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/children/<child_id>', methods=['GET', 'POST', 'DELETE'],
endpoint='svc_eat_v1_application_children_child_id')
@inject_application
def svc_eat_v1_application_children_child_id(application, child_id):
child_form = ChildForm(csrf_enabled=False)
try:
child = application.children.get(_id=ObjectId(child_id))
if request.method == 'GET':
return json.dumps(child.dict)
elif request.method == 'POST':
if not child_form.validate_on_submit():
return Response(
response=json.dumps({'errors': child_form.errors, 'form': child_form.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
for field in ['first_name', 'middle_initial', 'last_name', 'school_postal', 'school_city',
'school_state', 'school_name']:
child[field] = child_form.data[field]
application.save()
else:
application.children.remove(child)
application.save()
return Response(response=json.dumps(application.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
except DoesNotExist:
return Response(
response=json.dumps({'errors': 'Child does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
except Exception:
return Response(
response=json.dumps({'errors': 'The child could not be queried'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/children/<child_id>/incomes', methods=['GET', 'POST'],
endpoint='svc_eat_v1_application_children_child_id_incomes')
@inject_application
def svc_eat_v1_application_children_child_id_incomes(application, child_id):
income_form = IncomeForm(csrf_enabled=False)
try:
child = application.children.get(_id=ObjectId(child_id))
if request.method == 'GET':
return json.dumps([i.dict for i in child.incomes])
else:
if not income_form.validate_on_submit():
return Response(
response=json.dumps({'errors': income_form.errors, 'form': income_form.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
income = Income()
for field in ['source', 'amount', 'frequency']:
if income_form.data[field]:
income[field] = income_form.data[field]
child.incomes.append(income)
application.save()
return Response(response=json.dumps(income.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
except DoesNotExist:
return Response(
response=json.dumps({'errors': 'Child does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
except Exception:
return Response(
response=json.dumps({'errors': 'The child could not be queried'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/children/<child_id>/incomes/<income_id>', methods=['GET', 'DELETE'],
endpoint='svc_eat_v1_application_children_child_id_incomes_income_id')
@inject_application
def svc_eat_v1_application_children_child_id_incomes_income_id(application, child_id, income_id):
try:
child = application.children.get(_id=ObjectId(child_id))
income = child.incomes.get(_id=ObjectId(income_id))
if request.method == 'GET':
return json.dumps(income.dict)
else:
child.incomes.remove(income)
application.save()
return Response(response=json.dumps(application.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
except DoesNotExist:
return Response(
response=json.dumps({'errors': 'Income does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
except Exception:
return Response(
response=json.dumps({'errors': 'The income could not be queried'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/children/<child_id>/programs', methods=['GET', 'POST'],
endpoint='svc_eat_v1_application_children_child_id_programs')
@inject_application
def svc_eat_v1_application_children_child_id_programs(application, child_id):
programs_form = ChildStatusForm(csrf_enabled=False)
try:
child = application.children.get(_id=ObjectId(child_id))
if request.method == 'GET':
return json.dumps([p.dict for p in child.programs])
else:
if not programs_form.validate_on_submit():
return Response(
response=json.dumps({'errors': programs_form.errors, 'form': programs_form.programs.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
# programs = [Program(program_name=p) for (p, k) in programs_form.data.items() if k]
# child.programs = programs
application.save()
return Response(response=json.dumps(application.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
except DoesNotExist:
return Response(
response=json.dumps({'errors': 'Child does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
except Exception:
return Response(
response=json.dumps({'errors': 'The child could not be queried'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/children/<child_id>/ethnicities', methods=['GET', 'POST'],
endpoint='svc_eat_v1_application_children_child_id_ethnicities')
@inject_application
def svc_eat_v1_application_children_child_id_ethnicities(application, child_id):
ethnicities_form = EthnicityForm(csrf_enabled=False)
try:
child = application.children.get(_id=ObjectId(child_id))
if request.method == 'GET':
return json.dumps([p.dict for p in child.ethnicities])
else:
if not ethnicities_form.validate_on_submit():
return Response(
response=json.dumps(
{'errors': ethnicities_form.errors, 'form': ethnicities_form.ethnicities.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
ethnicities = [Ethnicity(ethnicity_name=p) for (p, k) in ethnicities_form.data.items() if k]
child.ethnicities = ethnicities
application.save()
return Response(response=json.dumps(application.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
except DoesNotExist:
return Response(
response=json.dumps({'errors': 'Child does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
except Exception:
return Response(
response=json.dumps({'errors': 'The child could not be queried'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/persons', methods=['GET', 'POST'],
endpoint='svc_eat_v1_application_persons')
@inject_application
def svc_eat_v1_application_persons(application):
person_form = PersonForm(csrf_enabled=False)
if request.method == 'GET':
if application.persons:
return json.dumps([c.dict for c in application.persons])
else:
return Response(
response=json.dumps({'errors': 'Persons do not exist.', 'form': person_form.data}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
if not person_form.validate_on_submit():
return Response(
response=json.dumps({'errors': person_form.errors, 'form': person_form.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
person = Person()
for field in ['first_name', 'middle_initial', 'last_name']:
if person_form.data[field]:
person[field] = person_form.data[field]
application.persons.append(person)
application.save()
return Response(response=json.dumps(person.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/persons/<person_id>', methods=['GET', 'POST', 'DELETE'],
endpoint='svc_eat_v1_application_persons_person_id')
@inject_application
def svc_eat_v1_application_persons_person_id(application, person_id):
person_form = PersonForm(csrf_enabled=False)
try:
person = application.persons.get(_id=ObjectId(person_id))
if request.method == 'GET':
return json.dumps(person.dict)
elif request.method == 'POST':
if not person_form.validate_on_submit():
return Response(
response=json.dumps({'errors': person_form.errors, 'form': person_form.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
for field in ['first_name', 'middle_initial', 'last_name']:
person[field] = person_form.data[field]
application.save()
else:
application.persons.remove(person)
application.save()
return Response(response=json.dumps(person.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
except DoesNotExist:
return Response(
response=json.dumps({'errors': 'Person does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
except Exception:
return Response(
response=json.dumps({'errors': 'The person could not be queried'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/persons/<person_id>/incomes', methods=['GET', 'POST'],
endpoint='svc_eat_v1_application_persons_person_id_incomes')
@inject_application
def svc_eat_v1_application_persons_person_id_incomes(application, person_id):
income_form = IncomeForm(csrf_enabled=False)
try:
person = application.persons.get(_id=ObjectId(person_id))
if request.method == 'GET':
return json.dumps([i.dict for i in person.incomes])
else:
if not income_form.validate_on_submit():
return Response(
response=json.dumps({'errors': income_form.errors, 'form': income_form.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
income = Income()
for field in ['source', 'amount', 'frequency']:
if income_form.data[field]:
income[field] = income_form.data[field]
person.incomes.append(income)
application.save()
return Response(response=json.dumps(income.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
except DoesNotExist:
return Response(
response=json.dumps({'errors': 'Person does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
except Exception:
return Response(
response=json.dumps({'errors': 'The person could not be queried'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/persons/<person_id>/incomes/<income_id>', methods=['GET', 'DELETE'],
endpoint='svc_eat_v1_application_persons_person_id_incomes_income_id')
@inject_application
def svc_eat_v1_application_persons_person_id_incomes_income_id(application, person_id, income_id):
try:
person = application.persons.get(_id=ObjectId(person_id))
income = person.incomes.get(_id=ObjectId(income_id))
if request.method == 'GET':
return json.dumps(income.dict)
else:
person.incomes.remove(income)
application.save()
return Response(response=json.dumps(application.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
except DoesNotExist:
return Response(
response=json.dumps({'errors': 'Income does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
except Exception:
return Response(
response=json.dumps({'errors': 'The income could not be queried'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/persons/<person_id>/programs', methods=['GET', 'POST'],
endpoint='svc_eat_v1_application_persons_person_id_programs')
@inject_application
def svc_eat_v1_application_persons_person_id_programs(application, person_id):
programs_form = ProgramsForm(csrf_enabled=False)
try:
person = application.persons.get(_id=ObjectId(person_id))
if request.method == 'GET':
return json.dumps([p.dict for p in person.programs])
else:
if not programs_form.validate_on_submit():
return Response(
response=json.dumps({'errors': programs_form.errors, 'form': programs_form.programs.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
programs = [Program(program_name=p) for (p, k) in programs_form.data.items() if k]
person.programs = programs
application.save()
return Response(response=json.dumps(application.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
except DoesNotExist:
return Response(
response=json.dumps({'errors': 'Person does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
except Exception:
return Response(
response=json.dumps({'errors': 'The person could not be queried'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
@app.route('/svc/eat/v1/application/persons/<person_id>/ethnicities', methods=['GET', 'POST'],
endpoint='svc_eat_v1_application_persons_person_id_ethnicities')
@inject_application
def svc_eat_v1_application_persons_person_id_ethnicities(application, person_id):
ethnicities_form = EthnicityForm(csrf_enabled=False)
try:
person = application.persons.get(_id=ObjectId(person_id))
if request.method == 'GET':
return json.dumps([p.dict for p in person.ethnicities])
else:
if not ethnicities_form.validate_on_submit():
return Response(
response=json.dumps(
{'errors': ethnicities_form.errors, 'form': ethnicities_form.ethnicities.data}),
status=400, headers=None,
content_type='application/json; charset=utf-8')
ethnicities = [Ethnicity(ethnicity_name=p) for (p, k) in ethnicities_form.data.items() if k]
person.ethnicities = ethnicities
application.save()
return Response(response=json.dumps(application.dict),
status=201, headers=None,
content_type='application/json; charset=utf-8')
except DoesNotExist:
return Response(
response=json.dumps({'errors': 'Person does not exist.'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
except Exception:
return Response(
response=json.dumps({'errors': 'The person could not be queried'}),
status=404, headers=None,
content_type='application/json; charset=utf-8')
|
|
import commands
import json
import os
from zipfile import ZipFile, BadZipfile
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.conf import settings
from django.template.defaultfilters import slugify
from ..models import SapelliProject
from .project_mapper import create_project
from .sapelli_exceptions import (
SapelliException,
SapelliSAPException,
SapelliDuplicateException
)
import geokey_sapelli
def get_sapelli_dir_path(user=None):
"""
Creates the Sapelli working directory.
Parameters
----------
user : geokey.users.models.User
User who uploaded the project (optional).
Returns
-------
str:
Absolute path to the Sapelli working directory.
Raises
------
SapelliException:
When the working directory could not be created.
"""
sapelli_dir_path = os.path.join(default_storage.path('sapelli'), '') # joining with '' adds the trailing / or \
if user:
sapelli_dir_path = os.path.join(sapelli_dir_path, slugify(str(user.id) + '_' + user.display_name), '')
if not os.path.exists(sapelli_dir_path):
# Create the directory if it doesn't exist:
try:
os.makedirs(sapelli_dir_path)
except BaseException, e:
raise SapelliException('Failed to create Sapelli working directory (%s): %s' % (sapelli_dir_path, str(e)))
return sapelli_dir_path
def load_from_sap(sap_file, user):
"""
Loads & saves a SapelliProject from the given SAP file.
Parameters
----------
sap_file : django.core.files.File
Uploaded (suspected) SAP file.
user : geokey.users.models.User
User who uploaded the project.
Returns
-------
SapelliProject:
SapelliProject instance for the parsed project.
Raises
------
SapelliException:
In case of a configuration problem.
SapelliSAPException:
When project loading fails.
SapelliDuplicateException:
When the project has already been uploaded.
"""
# Check if we got a file at all:
if sap_file is None:
raise SapelliSAPException('No file provided.')
# Store copy of file on disk (as it probably is an "in memory" file uploaded in an HTTP request):
try:
filename, extension = os.path.splitext(os.path.basename(sap_file.name))
relative_sap_file_path = default_storage.save(os.path.join(get_sapelli_dir_path(user), 'SAPs', '') + filename + extension, ContentFile(sap_file.read()))
sap_file_path = default_storage.path(relative_sap_file_path)
except BaseException, e:
raise SapelliSAPException('Failed to store uploaded file: ' + str(e))
# The file will be deleted if an exception is raised in this block:
try:
# Check if it is a valid SAP file:
check_sap_file(sap_file_path)
# Load Sapelli project (extract+parse) using SapColCmdLn Java program:
sapelli_project_info = get_sapelli_project_info(sap_file_path, user)
# Check for duplicates:
if SapelliProject.objects.exists_for_contribution_by_sapelli_info(
sapelli_project_info['sapelli_id'],
sapelli_project_info['sapelli_fingerprint']):
raise SapelliDuplicateException
# Create GeoKey and SapelliProject:
try:
geokey_project = create_project(sapelli_project_info, user, sap_file_path)
except BaseException, e:
raise SapelliSAPException(str(e))
except BaseException, e:
try: # Remove file:
os.remove(sap_file_path)
except BaseException:
pass
raise e
# When successful return the SapelliProject object:
return geokey_project.sapelli_project
def check_sap_file(sap_file_path):
"""
Checks if the file at the given path is a valid Sapelli project file.
Parameters
----------
sap_file_path : str
Path to (suspected) Sapelli project file.
Raises
------
SapelliSAPException:
When the given file does not exist, is not a ZIP archive, or does not contain PROJECT.xml.
"""
try:
if not os.path.isfile(sap_file_path):
raise SapelliSAPException('The file does not exist.')
# Check if it is a ZIP file:
zip = ZipFile(sap_file_path) # throws BadZipfile
# Check if it contains PROJECT.xml:
zip.getinfo('PROJECT.xml') # throws KeyError
except BadZipfile:
raise SapelliSAPException('The file is not a valid Sapelli project file (*.sap, *.excites or *.zip).')
except KeyError:
raise SapelliSAPException('The file is not a valid Sapelli project file (ZIP archive does not contain PROJECT.xml file).')
finally:
try:
zip.close()
except BaseException:
pass
def get_sapelli_jar_path():
"""
Determines where the Sapelli jar file is.
Returns
-------
str:
Absolute path to the Sapelli jar file.
Raises
------
SapelliException:
When the Sapelli jar file cannot be found.
"""
# Determine where the Sapelli jar file is expected to be:
if getattr(settings, 'SAPELLI_JAR', None) is not None:
# Use path configured in GeoKey settings.py:
sapelli_jar_path = settings.SAPELLI_JAR
else:
# Use default path (as advised in README.rst):
sapelli_jar_path = os.path.abspath(
os.path.join(
os.path.dirname(os.path.abspath(geokey_sapelli.__path__[0])),
'lib',
'sapelli-collector-cmdln-with-dependencies.jar'))
# Check if the Sapelli jar is actually there:
if not os.path.isfile(sapelli_jar_path):
raise SapelliException('Cannot find Sapelli jar file at path: %s' % sapelli_jar_path)
# Return path:
return sapelli_jar_path
def get_sapelli_project_info(sap_file_path, user):
"""
Uses the Sapelli Collector cmdlnd client (Java) to extract the SAP file and parse the PROJECT.xml.
Parameters
----------
sap_file_path : str
Path to Sapelli project file.
user : geokey.users.models.User
User who uploaded the project (optional).
Returns
-------
dict:
the "sapelli_project_info" dictionary describing the loaded project.
Raises
------
SapelliException:
When the Sapelli jar file cannot be found,
the Sapelli working directory cannot be created,
or when the java command could not be run.
SapelliSAPException:
When an error occurs during running of SapColCmdLn, will contain java_stacktrace.
"""
# Run SapColCmdLn class from the Sapelli jar:
std_output = None
try:
command = 'java -cp %s uk.ac.ucl.excites.sapelli.collector.SapColCmdLn -p %s -load "%s" -geokey' % (
get_sapelli_jar_path(),
get_sapelli_dir_path(user),
sap_file_path
)
std_output = commands.getstatusoutput(command)[1] # may fail if we somehow can't run java at all(?)
return json.loads(std_output) #fails if java/SapColCmdLn output is not valid JSON
except SapelliException, se: # coming from get_sapelli_jar_path or get_sapelli_dir_path
raise se
except BaseException, e:
if std_output is not None:
raise SapelliSAPException('SapColCmdLn error', java_stacktrace=std_output)
else:
raise SapelliException('Command error: ' + str(e))
|
|
import logging
import json
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse, QueryDict, HttpResponseRedirect
from django.core.urlresolvers import reverse, reverse_lazy
from django.views.generic import (
CreateView,
DetailView,
ListView,
UpdateView,
DeleteView)
from django.shortcuts import get_object_or_404
from project.users.models import User
from .form_importers import get_form_impoter_plugin_urls
from .forms import (
EventForm,
EmailForm,
LocationForm,
FormEntryForm,
FormElementEntryFormSet,
)
from .models import (
Event,
EmailApp,
Location,
FormEntry,
FormElementEntry,
FormHandlerEntry)
from django.http import JsonResponse
# ********* New ****************
from django.contrib.auth.decorators import login_required, permission_required
# from .decorators import permissions_required, SATISFY_ALL, SATISFY_ANY
from django.contrib import messages
from django.shortcuts import redirect, render
from django.db import models, IntegrityError
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from .base import (
fire_form_callbacks,
run_form_handlers,
form_element_plugin_registry,
form_handler_plugin_registry,
submit_plugin_form_data,
get_theme,
)
from .constants import (
CALLBACK_BEFORE_FORM_VALIDATION,
CALLBACK_FORM_VALID_BEFORE_SUBMIT_PLUGIN_FORM_DATA,
CALLBACK_FORM_VALID,
CALLBACK_FORM_VALID_AFTER_FORM_HANDLERS,
CALLBACK_FORM_INVALID
)
from .dynamic import assemble_form_class
from .settings import GET_PARAM_INITIAL_DATA, DEBUG
from .utils import (
append_edit_and_delete_links_to_field,
get_user_form_element_plugins_grouped,
get_user_form_field_plugin_uids,
get_user_form_handler_plugins,
get_user_form_handler_plugin_uids,
)
# class EventList(LoginRequiredMixin, ListView):
class EventList(ListView):
"""Organiser can view a list of his events"""
model = Event
def get_context_data(self, **kwargs):
context = super(EventList, self).get_context_data(**kwargs)
context['events'] = Event.objects.filter(user=self.request.user)
# context['events'] = Event.objects.all()
# context['quota'] = self.request.user.event_value
return context
class EventDetail(DetailView):
"""Organiser can view a list of his events"""
model = Event
template_name = 'events/event_detail.html'
def get_context_data(self, **kwargs):
context = super(EventDetail, self).get_context_data(**kwargs)
context['locations'] = Location.objects.filter(
event__title=self.object.title) # .filter(
# event__user=self.request.user)
# context['collection'] = FormElementEntry.objects.filter(
# form_entry_id=self.object.pk)
# context['options'] = assemble_form_class(
# self.object,
# )
# context['collection_quota'] = self.request.user.collection_value
return context
class EventDelete(LoginRequiredMixin, DeleteView):
"""Organiser can delete the Event"""
model = Event
template_name = 'events/event_delete.html'
form_class = EventForm
def get_success_url(self):
return reverse('events:list')
class EventCreate(LoginRequiredMixin, CreateView):
"""Organiser can create Event in frontend"""
model = Event
form_class = EventForm
def get_template_names(self):
check_object = Event.objects.filter(
user=self.request.user
).order_by('-title').exists()
check_events_number = Event.objects.filter(
user=self.request.user).count()
if check_object is False:
return ['events/event_form.html']
elif check_object is True and \
check_events_number < \
self.request.user.event_value:
return ['events/event_form.html']
else:
return ['events/event_list.html']
def get_context_data(self, **kwargs):
context = super(EventCreate, self).get_context_data(**kwargs)
context['events'] = Event.objects.filter(user=self.request.user)
return context
def form_valid(self, form):
check_events_number = Event.objects.filter(
user=self.request.user).count()
organiser = form.save(commit=False)
if check_events_number < self.request.user.event_value:
organiser.user = User.objects.get(username=self.request.user)
organiser.save()
return HttpResponseRedirect(
reverse('events:l-create', args=(organiser.pk,)))
else:
return HttpResponseRedirect(
reverse('events:list'))
class EventUpdate(LoginRequiredMixin, UpdateView):
model = Event
form_class = EventForm
template_name = 'events/event_form.html'
success_url = reverse_lazy('events:list')
class LocationCreate(LoginRequiredMixin, CreateView):
"""Organiser can create Location"""
template_name = 'events/location_form.html'
form_class = LocationForm
model = Location
def form_valid(self, form):
# Pass the Foreign Key to the form
form.instance.event = get_object_or_404(
Event, pk=self.kwargs.get('pk'))
# Verify the user quota against default quota
event_location_quota = Event.objects.filter(
pk=self.kwargs['pk']).values_list(
'location_quota', flat=True)[0]
user_locations_count = Location.objects.filter(
event__pk=self.kwargs['pk']).filter(
event__user=self.request.user).count()
location = form.save(commit=False)
# Save form only if user passes condition
if user_locations_count < event_location_quota:
location.save()
return super(LocationCreate, self).form_valid(form)
# Else redirect him to the Events list
else:
return HttpResponseRedirect(
reverse('events:list'))
# Pass the Event pk to the collection
def get_success_url(self, **kwargs):
return reverse_lazy('events:fobi.edit_form_entry',
kwargs={'form_entry_id': self.kwargs['pk']})
class LocationDelete(LoginRequiredMixin, DeleteView):
"""Organiser can delete the Location"""
model = Location
template_name = 'events/location_delete.html'
form_class = LocationForm
# After delete go the event
def get_success_url(self, **kwargs):
pk = Location.objects.filter(
pk=self.kwargs['pk']).values_list(
'event__pk', flat=True)[0]
return reverse_lazy('events:detail',
kwargs={'pk': pk})
class LocationUpdate(LoginRequiredMixin, UpdateView):
model = Location
form_class = LocationForm
# After update go the event
def get_success_url(self, **kwargs):
pk = Location.objects.filter(
pk=self.kwargs['pk']).values_list('event__pk', flat=True)[0]
return reverse_lazy('events:detail',
kwargs={'pk': pk})
class AjaxableResponseMixin(object):
"""
Mixin to add AJAX support to a form.
Must be used with an object-based FormView (e.g. CreateView)
"""
def form_invalid(self, form):
response = super(AjaxableResponseMixin, self).form_invalid(form)
if self.request.is_ajax():
return JsonResponse(form.errors, status=400)
else:
return response
def form_valid(self, form):
# We make sure to call the parent's form_valid() method because
# it might do some processing (in the case of CreateView, it will
# call form.save() for example).
response = super(AjaxableResponseMixin, self).form_valid(form)
if self.request.is_ajax():
data = {
'pk': self.object.pk,
'name': self.object.name,
'email': self.object.email,
}
return JsonResponse(data)
else:
return response
# from tablib import Dataset
class EmailCreate(LoginRequiredMixin, AjaxableResponseMixin, CreateView):
"""Organiser can create Location"""
template_name = 'events/email_form.html'
form_class = EmailForm
model = EmailApp
def form_valid(self, form):
# Pass the Foreign Key to the form
form.instance.event = get_object_or_404(
Event, pk=self.kwargs.get('pk'))
# Verify the user quota against default quota
event_email_quota = Event.objects.filter(
pk=self.kwargs['pk']).values_list(
'email_quota', flat=True)[0]
user_email_count = EmailApp.objects.filter(
event__pk=self.kwargs['pk']).filter(
event__user=self.request.user).count()
email = form.save(commit=False)
# Save form only if user passes condition
if user_email_count < event_email_quota:
email.save()
return super().form_valid(form)
# Else redirect him to the Events list
else:
return HttpResponseRedirect(
reverse('events:list'))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['emails'] = EmailApp.objects.filter(
event__pk=self.kwargs.get('pk')).order_by('-pk')
context['event_email_quota'] = Event.objects.filter(
pk=self.kwargs['pk']).values_list(
'email_quota', flat=True)[0]
return context
def get_success_url(self, **kwargs):
return reverse_lazy('events:list')
# class InviteeURL(DetailView):
# model = Event
# template_name = 'invitee_url.html'
import os # noqa
import csv # noqa
def upload_csv(request):
if not request.user.is_authenticated:
return redirect("home")
csv_path = os.path.dirname(os.path.abspath(__file__))
try:
with open(csv_path) as f:
reader = csv.reader(f)
for row in reader:
event_obj, created = Event.objects.filter(
pk=str(row[0]),
)
product_obj, created = EmailApp.objects.get_or_create(
event=event_obj,
name=str(row[1]),
email=str(row[2]),
)
success = "Added to database"
context = {"success": success}
except csv.Error as e:
print(e)
context = {'error': e}
template = "events/email_form.html"
return render(request, template, context)
# @login_required
def delete_post(request):
if request.method == 'DELETE':
post = EmailApp.objects.get(
pk=int(QueryDict(request.body).get('postpk')))
post.delete()
response_data = {}
response_data['msg'] = 'Post was deleted.'
return HttpResponse(
json.dumps(response_data),
content_type="application/json"
)
else:
return HttpResponse(
json.dumps({"nothing to see": "this isn't happening"}),
content_type="application/json"
)
def _delete_plugin_entry(request,
entry_id,
entry_model_cls,
get_user_plugin_uids_func,
message,
html_anchor):
"""Abstract delete entry.
:param django.http.HttpRequest request:
:param int entry_id:
:param fobi.models.AbstractPluginEntry entry_model_cls: Subclass of
``fobi.models.AbstractPluginEntry``.
:param callable get_user_plugin_uids_func:
:param str message:
:return django.http.HttpResponse:
"""
try:
obj = entry_model_cls._default_manager \
.select_related('form_entry') \
.get(pk=entry_id,
form_entry__user__pk=request.user.pk)
except ObjectDoesNotExist:
raise Http404(("{0} not found.").format(
entry_model_cls._meta.verbose_name)
)
form_entry = obj.form_entry
plugin = obj.get_plugin(request=request)
plugin.request = request
plugin._delete_plugin_data()
obj.delete()
messages.info(request, message.format(plugin.name))
redirect_url = reverse(
'events:fobi.edit_form_entry', kwargs={'form_entry_id': form_entry.pk}
)
return redirect("{0}{1}".format(redirect_url, html_anchor))
# *****************************************************************************
# **************************** Add form handler entry *************************
# *****************************************************************************
@login_required
# @permission_required('events.add_formhandlerentry')
def add_form_handler_entry(request,
form_entry_id,
form_handler_plugin_uid,
theme=None,
template_name=None):
"""Add form handler entry.
:param django.http.HttpRequest request:
:param int form_entry_id:
:param int form_handler_plugin_uid:
:param fobi.base.BaseTheme theme: Theme instance.
:param string template_name:
:return django.http.HttpResponse:
"""
print('From handler', form_entry_id)
try:
form_entry = Event._default_manager.get(pk=form_entry_id)
except ObjectDoesNotExist:
raise Http404("Form entry not found.")
user_form_handler_plugin_uids = get_user_form_handler_plugin_uids(
request.user
)
print(user_form_handler_plugin_uids)
if form_handler_plugin_uid not in user_form_handler_plugin_uids:
raise Http404("Plugin does not exist or you are not allowed "
"to use this plugin!")
form_handler_plugin_cls = form_handler_plugin_registry.get(
form_handler_plugin_uid
)
# Check if we deal with form handler plugin that is only allowed to be
# used once. In that case, check if it has been used already in the current
# form entry.
if not form_handler_plugin_cls.allow_multiple:
times_used = FormHandlerEntry._default_manager \
.filter(form_entry__id=form_entry_id,
plugin_uid=form_handler_plugin_cls.uid) \
.count()
if times_used > 0:
raise Http404(
("The {0} plugin can be used only once in a "
"form.").format(form_handler_plugin_cls.name)
)
form_handler_plugin = form_handler_plugin_cls(user=request.user)
form_handler_plugin.request = request
form_handler_plugin_form_cls = form_handler_plugin.get_form()
form = None
obj = FormHandlerEntry()
obj.form_entry = form_entry
obj.plugin_uid = form_handler_plugin_uid
obj.user = request.user
save_object = False
if not form_handler_plugin_form_cls:
save_object = True
elif request.method == 'POST':
form = form_handler_plugin.get_initialised_create_form_or_404(
data=request.POST,
files=request.FILES
)
if form.is_valid():
# Saving the plugin form data.
form.save_plugin_data(request=request)
# Getting the plugin data.
obj.plugin_data = form.get_plugin_data(request=request)
save_object = True
else:
form = form_handler_plugin.get_initialised_create_form_or_404()
if save_object:
# Save the object.
obj.save()
messages.info(
request,
('The form handler plugin "{0}" was added '
'successfully.').format(form_handler_plugin.name)
)
# return redirect(
# "{0}?active_tab=tab-form-handlers".format(
# reverse(
# 'fobi.edit_form_entry',
# kwargs={'form_entry_id': form_entry_id}
# )
# )
# )
return redirect(reverse('events:list'))
context = {
'form': form,
'form_entry': form_entry,
'form_handler_plugin': form_handler_plugin,
}
# If given, pass to the template (and override the value set by
# the context processor.
if theme:
context.update({'fobi_theme': theme})
if not template_name:
if not theme:
theme = get_theme(request=request, as_instance=True)
template_name = theme.add_form_handler_entry_template
return render(request, template_name, context)
# *****************************************************************************
# **************************** Edit form handler entry ************************
# *****************************************************************************
# @login_required
# @permission_required('events.change_formhandlerentry')
def edit_form_handler_entry(request,
form_handler_entry_id,
theme=None,
template_name=None):
"""Edit form handler entry.
:param django.http.HttpRequest request:
:param int form_handler_entry_id:
:param fobi.base.BaseTheme theme: Theme instance.
:param string template_name:
:return django.http.HttpResponse:
"""
try:
obj = FormHandlerEntry._default_manager \
.select_related('form_entry') \
.get(pk=form_handler_entry_id)
except ObjectDoesNotExist:
raise Http404("Form handler entry not found.")
form_entry = obj.form_entry
form_handler_plugin = obj.get_plugin(request=request)
form_handler_plugin.request = request
FormHandlerPluginForm = form_handler_plugin.get_form()
form = None
if not FormHandlerPluginForm:
messages.info(
request,
('The form handler plugin "{0}" is not '
'configurable!').format(form_handler_plugin.name)
)
return redirect('events:fobi.edit_form_entry',
form_entry_id=form_entry.pk)
elif request.method == 'POST':
form = form_handler_plugin.get_initialised_edit_form_or_404(
data=request.POST,
files=request.FILES
)
if form.is_valid():
# Saving the plugin form data.
form.save_plugin_data(request=request)
# Getting the plugin data.
obj.plugin_data = form.get_plugin_data(request=request)
# Save the object.
obj.save()
messages.info(
request,
('The form handler plugin "{0}" was edited '
'successfully.').format(form_handler_plugin.name)
)
return redirect('events:fobi.edit_form_entry',
form_entry_id=form_entry.pk)
else:
form = form_handler_plugin.get_initialised_edit_form_or_404()
context = {
'form': form,
'form_entry': form_entry,
'form_handler_plugin': form_handler_plugin,
}
# If given, pass to the template (and override the value set by
# the context processor.
if theme:
context.update({'fobi_theme': theme})
if not template_name:
if not theme:
theme = get_theme(request=request, as_instance=True)
template_name = theme.edit_form_handler_entry_template
return render(request, template_name, context)
# *****************************************************************************
# **************************** Delete form handler entry **********************
# *****************************************************************************
# @login_required
# @permission_required('events.delete_formhandlerentry')
def delete_form_handler_entry(request, form_handler_entry_id):
"""Delete form handler entry.
:param django.http.HttpRequest request:
:param int form_handler_entry_id:
:return django.http.HttpResponse:
"""
return _delete_plugin_entry(
request=request,
entry_id=form_handler_entry_id,
entry_model_cls=FormHandlerEntry,
get_user_plugin_uids_func=get_user_form_handler_plugin_uids,
message='The form handler plugin "{0}" '
'was deleted successfully.',
html_anchor='?active_tab=tab-form-handlers'
)
# *****************************************************************************
# **************************** Create form entry ******************************
# *****************************************************************************
@login_required
def edit_form_entry(request, form_entry_id, theme=None, template_name=None):
"""Edit form entry.
:param django.http.HttpRequest request:
:param int form_entry_id:
:param fobi.base.BaseTheme theme: Theme instance.
:param str template_name:
:return django.http.HttpResponse:
"""
try:
form_entry = Event._default_manager \
.select_related('user') \
.prefetch_related('formelemententry_set') \
.get(pk=form_entry_id, user__pk=request.user.pk)
except ObjectDoesNotExist as err:
raise Http404("Form entry not found.")
if request.method == 'POST':
# The form entry form (does not contain form elements)
form = FormEntryForm(request.POST, request.FILES, instance=form_entry,
request=request)
# This is where we save ordering if it has been changed.
# The `FormElementEntryFormSet` contain ids and positions only.
if 'ordering' in request.POST:
form_element_entry_formset = FormElementEntryFormSet(
request.POST,
request.FILES,
queryset=form_entry.formelemententry_set.all(),
# prefix = 'form_element'
)
# If form elements aren't properly made (developers's fault)
# there might be problems with saving the ordering - likely
# in case of hidden elements only. Thus, we want to avoid
# errors here.
try:
if form_element_entry_formset.is_valid():
form_element_entry_formset.save()
messages.info(
request,
"Elements ordering edited successfully."
)
return redirect(
reverse('events:fobi.edit_form_entry',
kwargs={'form_entry_id': form_entry_id})
)
except MultiValueDictKeyError as err: # noqa
messages.error(
request,
"Errors occurred while trying to change the "
"elements ordering!")
return redirect(
reverse('events:fobi.edit_form_entry',
kwargs={'form_entry_id': form_entry_id})
)
else:
form_element_entry_formset = FormElementEntryFormSet(
queryset=form_entry.formelemententry_set.all(),
# prefix='form_element'
)
if form.is_valid():
obj = form.save(commit=False)
obj.user = request.user
try:
obj.save()
messages.info(
request,
('Form {0} was edited successfully.').format(
form_entry.name
)
)
return redirect(
reverse(
'events:fobi.edit_form_entry',
kwargs={'form_entry_id': form_entry_id}
)
)
except IntegrityError as err:
messages.info(
request,
(
'Errors occurred while saving the form: {0}.'
).format(str(err))
)
else:
# The form entry form (does not contain form elements)
form = FormEntryForm(instance=form_entry, request=request)
form_element_entry_formset = FormElementEntryFormSet(
queryset=form_entry.formelemententry_set.all(),
# prefix='form_element'
)
# In case of success, we don't need this (since redirect would happen).
# Thus, fetch only if needed.
form_elements = form_entry.formelemententry_set.all()
form_handlers = form_entry.formhandlerentry_set.all()[:]
used_form_handler_uids = [form_handler.plugin_uid
for form_handler
in form_handlers]
# The code below (two lines below) is not really used at the moment,
# thus - comment out, but do not remove, as we might need it later on.
# all_form_entries = FormEntry._default_manager \
# .only('id', 'name', 'slug') \
# .filter(user__pk=request.user.pk)
# List of form element plugins allowed to user
user_form_element_plugins = get_user_form_element_plugins_grouped(
request.user
)
# List of form handler plugins allowed to user
user_form_handler_plugins = get_user_form_handler_plugins(
request.user,
exclude_used_singles=True,
used_form_handler_plugin_uids=used_form_handler_uids
)
# Assembling the form for preview
form_cls = assemble_form_class(
form_entry,
origin='edit_form_entry',
origin_kwargs_update_func=append_edit_and_delete_links_to_field,
request=request
)
assembled_form = form_cls()
# print('assembled_form', assembled_form)
# In debug mode, try to identify possible problems.
if DEBUG:
assembled_form.as_p()
else:
try:
assembled_form.as_p()
except Exception as err:
logger.error(err)
# If no theme provided, pick a default one.
if not theme:
theme = get_theme(request=request, as_instance=True)
theme.collect_plugin_media(form_elements)
# Verify the user quota against default quota
event_location_quota = Event.objects.filter(
pk=form_entry.pk).values_list(
'collection_quota', flat=True)[0]
user_locations_count = FormEntry.objects.filter(
event__pk=form_entry.pk).filter(
event__user=request.user).count()
context = {
'form': form,
'form_entry': form_entry,
'form_elements': form_elements,
'form_handlers': form_handlers,
# 'all_form_entries': all_form_entries,
'user_form_element_plugins': user_form_element_plugins,
'user_form_handler_plugins': user_form_handler_plugins,
'assembled_form': assembled_form,
'form_element_entry_formset': form_element_entry_formset,
'fobi_theme': theme,
'collection_quota': request.user.collection_value,
'user_locations_count': user_locations_count,
'event_location_quota': event_location_quota,
}
# if not template_name:
# template_name = theme.edit_form_entry_template
template_name = 'bootstrap3/edit_form_view.html'
return render(request, template_name, context)
logger = logging.getLogger(__name__)
@login_required
def dashboard(request, theme=None, template_name=None):
"""Dashboard.
:param django.http.HttpRequest request:
:param fobi.base.BaseTheme theme: Theme instance.
:param string template_name:
:return django.http.HttpResponse:
"""
form_entries = Event._default_manager \
.filter(user__pk=request.user.pk) \
.select_related('user')
context = {
'form_entries': form_entries,
'form_importers': get_form_impoter_plugin_urls(),
}
# If given, pass to the template (and override the value set by
# the context processor.
if theme:
context.update({'fobi_theme': theme})
if not template_name:
theme = get_theme(request=request, as_instance=True)
template_name = theme.dashboard_template
return render(request, template_name, context)
class EventDetailInvitati(LoginRequiredMixin, DetailView):
"""Organiser can view a list of his events"""
model = EmailApp
template_name = 'events/event_detail_invitati.html'
slug_field = 'secret'
slug_url_kwarg = 'secret'
def get_context_data(self, **kwargs):
da = self.object
print(da)
context = super().get_context_data(**kwargs)
context['event'] = self.object.event
context['locations'] = Location.objects.filter(
event__title=self.object.event)
context['anas'] = FormElementEntry.objects.filter(
form_entry_id=self.object.pk)
print('Form: ', context['anas'])
context['collections'] = assemble_form_class(
self.object.event,
)
context['das'] = EmailApp.objects.values_list('event__title', flat=True)[6]
print('das: ', context['das'])
# context['collections'] = self.get_form()
print('collections: ', context['collections'])
return context
def view_form_entry(
request,
# form_entry_slug,
secret,
theme=None,
template_name=None):
"""View created form.
:param django.http.HttpRequest request:
:param string form_entry_slug:
:param fobi.base.BaseTheme theme: Theme instance.
:param string template_name:
:return django.http.HttpResponse:
"""
secrets = EmailApp.objects.filter(secret=secret)
try:
# kwargs = {'slug': form_entry_slug}
kwargs = {'emailapp': secrets}
if not request.user.is_authenticated():
kwargs.update({'is_public': True})
form_entry = Event._default_manager.select_related('user') \
.get(**kwargs)
except ObjectDoesNotExist as err:
raise Http404("Form entry not found.")
form_element_entries = form_entry.formelemententry_set.all()[:]
# This is where the most of the magic happens. Our form is being built
# dynamically.
form_cls = assemble_form_class(
form_entry,
form_element_entries=form_element_entries,
request=request
)
if request.method == 'POST':
form = form_cls(request.POST, request.FILES)
# Fire pre form validation callbacks
fire_form_callbacks(form_entry=form_entry,
request=request, form=form,
stage=CALLBACK_BEFORE_FORM_VALIDATION)
if form.is_valid():
# Fire form valid callbacks, before handling submitted plugin
# form data.
form = fire_form_callbacks(
form_entry=form_entry,
request=request,
form=form,
stage=CALLBACK_FORM_VALID_BEFORE_SUBMIT_PLUGIN_FORM_DATA
)
# Fire plugin processors
form = submit_plugin_form_data(
form_entry=form_entry,
invitee=secrets,
request=request,
form=form
)
# Fire form valid callbacks
form = fire_form_callbacks(form_entry=form_entry,
request=request, form=form,
stage=CALLBACK_FORM_VALID)
# Run all handlers
handler_responses, handler_errors = run_form_handlers(
form_entry=form_entry,
invitee=secret,
request=request,
form=form,
form_element_entries=form_element_entries
)
# Warning that not everything went ok.
if handler_errors:
for handler_error in handler_errors:
messages.warning(
request,
("Error occurred: {0}.").format(handler_error)
)
# Fire post handler callbacks
fire_form_callbacks(
form_entry=form_entry,
request=request,
form=form,
stage=CALLBACK_FORM_VALID_AFTER_FORM_HANDLERS
)
messages.info(
request,
("Form {0} was submitted successfully.").format(
form_entry.title
)
)
return redirect(
reverse('events:fobi.form_entry_submitted',
args=[form_entry.slug])
)
else:
# Fire post form validation callbacks
fire_form_callbacks(form_entry=form_entry, request=request,
form=form, stage=CALLBACK_FORM_INVALID)
else:
# Providing initial form data by feeding entire GET dictionary
# to the form, if ``GET_PARAM_INITIAL_DATA`` is present in the
# GET.
kwargs = {}
if GET_PARAM_INITIAL_DATA in request.GET:
kwargs = {'initial': request.GET}
form = form_cls(**kwargs)
# In debug mode, try to identify possible problems.
if DEBUG:
form.as_p()
else:
try:
form.as_p()
except Exception as err:
logger.error(err)
theme = get_theme(request=request, as_instance=True)
theme.collect_plugin_media(form_element_entries)
context = {
'form': form,
'form_entry': form_entry,
'fobi_theme': theme,
'fobi_form_title': form_entry.title,
}
if not template_name:
# template_name = theme.view_form_entry_template
template_name = 'events/event_detail_invitati.html'
return render(request, template_name, context)
def view_form_entry_public(
request,
form_entry_slug,
# secret,
theme=None,
template_name=None):
"""View created form.
:param django.http.HttpRequest request:
:param string form_entry_slug:
:param fobi.base.BaseTheme theme: Theme instance.
:param string template_name:
:return django.http.HttpResponse:
"""
# secrets = EmailApp.objects.filter(secret=secret)
try:
kwargs = {'slug': form_entry_slug}
# kwargs = {'emailapp': secrets}
# if not request.user.is_authenticated():
# kwargs.update({'is_public': True})
form_entry = Event._default_manager.select_related('user') \
.get(**kwargs)
except ObjectDoesNotExist as err:
raise Http404("Form entry not found.")
form_element_entries = form_entry.formelemententry_set.all()[:]
# This is where the most of the magic happens. Our form is being built
# dynamically.
form_cls = assemble_form_class(
form_entry,
form_element_entries=form_element_entries,
request=request
)
if request.method == 'POST':
form = form_cls(request.POST, request.FILES)
# Fire pre form validation callbacks
fire_form_callbacks(form_entry=form_entry,
request=request, form=form,
stage=CALLBACK_BEFORE_FORM_VALIDATION)
if form.is_valid():
# Fire form valid callbacks, before handling submitted plugin
# form data.
form = fire_form_callbacks(
form_entry=form_entry,
request=request,
form=form,
stage=CALLBACK_FORM_VALID_BEFORE_SUBMIT_PLUGIN_FORM_DATA
)
# Fire plugin processors
form = submit_plugin_form_data(
form_entry=form_entry,
# invitee=secrets,
request=request,
form=form
)
# Fire form valid callbacks
form = fire_form_callbacks(form_entry=form_entry,
request=request, form=form,
stage=CALLBACK_FORM_VALID)
# Run all handlers
handler_responses, handler_errors = run_form_handlers(
form_entry=form_entry,
# invitee=secret,
request=request,
form=form,
form_element_entries=form_element_entries
)
# Warning that not everything went ok.
if handler_errors:
for handler_error in handler_errors:
messages.warning(
request,
("Error occurred: {0}.").format(handler_error)
)
# Fire post handler callbacks
fire_form_callbacks(
form_entry=form_entry,
request=request,
form=form,
stage=CALLBACK_FORM_VALID_AFTER_FORM_HANDLERS
)
messages.info(
request,
("Form {0} was submitted successfully.").format(
form_entry.title
)
)
return redirect(
reverse('events:fobi.form_entry_submitted',
args=[form_entry.slug])
)
else:
# Fire post form validation callbacks
fire_form_callbacks(form_entry=form_entry, request=request,
form=form, stage=CALLBACK_FORM_INVALID)
else:
# Providing initial form data by feeding entire GET dictionary
# to the form, if ``GET_PARAM_INITIAL_DATA`` is present in the
# GET.
kwargs = {}
if GET_PARAM_INITIAL_DATA in request.GET:
kwargs = {'initial': request.GET}
form = form_cls(**kwargs)
# In debug mode, try to identify possible problems.
if DEBUG:
form.as_p()
else:
try:
form.as_p()
except Exception as err:
logger.error(err)
theme = get_theme(request=request, as_instance=True)
theme.collect_plugin_media(form_element_entries)
context = {
'form': form,
'form_entry': form_entry,
'fobi_theme': theme,
'fobi_form_title': form_entry.title,
}
if not template_name:
# template_name = theme.view_form_entry_template
template_name = 'events/event_detail_invitati.html'
return render(request, template_name, context)
@login_required
def delete_form_entry(request, form_entry_id, template_name=None):
"""Delete form entry.
:param django.http.HttpRequest request:
:param int form_entry_id:
:param string template_name:
:return django.http.HttpResponse:
"""
try:
obj = FormEntry._default_manager \
.get(pk=form_entry_id, user__pk=request.user.pk)
except ObjectDoesNotExist:
raise Http404("Form entry not found.")
obj.delete()
messages.info(
request,
('The form "{0}" was deleted successfully.').format(obj.name)
)
return redirect('events:fobi.dashboard')
@login_required
def add_form_element_entry(request,
form_entry_id,
form_element_plugin_uid,
theme=None,
template_name=None):
"""Add form element entry.
:param django.http.HttpRequest request:
:param int form_entry_id:
:param int form_element_plugin_uid:
:param fobi.base.BaseTheme theme: Theme instance.
:param string template_name:
:return django.http.HttpResponse:
"""
try:
form_entry = Event._default_manager \
.prefetch_related('formelemententry_set') \
.get(pk=form_entry_id)
except ObjectDoesNotExist:
raise Http404("Form entry not found.")
form_elements = form_entry.formelemententry_set.all()
user_form_element_plugin_uids = get_user_form_field_plugin_uids(
request.user
)
if form_element_plugin_uid not in user_form_element_plugin_uids:
raise Http404("Plugin does not exist or you are not allowed "
"to use this plugin!")
form_element_plugin_cls = form_element_plugin_registry.get(
form_element_plugin_uid
)
form_element_plugin = form_element_plugin_cls(user=request.user)
form_element_plugin.request = request
form_element_plugin_form_cls = form_element_plugin.get_form()
form = None
obj = FormElementEntry()
obj.form_entry = form_entry
obj.plugin_uid = form_element_plugin_uid
obj.user = request.user
save_object = False
if form_elements.count() < form_entry.collection_quota:
# If plugin doesn't have a form
if not form_element_plugin_form_cls:
save_object = True
# If POST
elif request.method == 'POST':
# If element has a form
form = form_element_plugin.get_initialised_create_form_or_404(
data=request.POST,
files=request.FILES
)
form.validate_plugin_data(form_elements, request=request)
if form.is_valid():
# Saving the plugin form data.
form.save_plugin_data(request=request)
# Getting the plugin data.
obj.plugin_data = form.get_plugin_data(request=request)
if form_elements.count() < form_entry.collection_quota:
save_object = True
else:
return HttpResponseRedirect(
reverse('events:list'))
# If not POST
else:
form = form_element_plugin.get_initialised_create_form_or_404()
else:
return HttpResponseRedirect(
reverse('events:list'))
if save_object:
# Handling the position
position = 1
records = FormElementEntry.objects.filter(form_entry=form_entry) \
.aggregate(models.Max('position'))
if records:
try:
position = records['{0}__max'.format('position')] + 1
except TypeError:
pass
obj.position = position
# Save the object.
obj.save()
messages.info(
request,
('The form element plugin "{0}" was added '
'successfully.').format(form_element_plugin.name)
)
return redirect(
# "{0}?active_tab=tab-form-elements".format(
reverse('events:fobi.edit_form_entry',
kwargs={'form_entry_id': form_entry_id})
)
# )
context = {
'form': form,
'form_entry': form_entry,
'form_element_plugin': form_element_plugin,
}
# If given, pass to the template (and override the value set by
# the context processor.
# if theme:
# context.update({'fobi_theme': theme})
if not template_name:
if not theme:
theme = get_theme(request=request, as_instance=True)
template_name = theme.add_form_element_entry_template
# else:
# template_name = 'k.html'
return render(request, template_name, context)
# else:
# return reverse_lazy('events:list')
# *****************************************************************************
# **************************** Edit form element entry ************************
# *****************************************************************************
@login_required
def edit_form_element_entry(request,
form_element_entry_id,
theme=None,
template_name=None):
"""Edit form element entry.
:param django.http.HttpRequest request:
:param int form_element_entry_id:
:param fobi.base.BaseTheme theme: Theme instance.
:param string template_name:
:return django.http.HttpResponse:
"""
try:
obj = FormElementEntry._default_manager \
.select_related('form_entry',
'form_entry__user') \
.get(pk=form_element_entry_id,
form_entry__user__pk=request.user.pk)
except ObjectDoesNotExist:
raise Http404("Form element entry not found.")
form_entry = obj.form_entry
form_element_plugin = obj.get_plugin(request=request)
form_element_plugin.request = request
FormElementPluginForm = form_element_plugin.get_form()
form = None
if not FormElementPluginForm:
messages.info(
request,
('The form element plugin "{0}" '
'is not configurable!').format(form_element_plugin.name)
)
return redirect('events:fobi.edit_form_entry', form_entry_id=form_entry.pk)
elif request.method == 'POST':
form = form_element_plugin.get_initialised_edit_form_or_404(
data=request.POST,
files=request.FILES
)
form_elements = FormElementEntry._default_manager \
.select_related('form_entry',
'form_entry__user') \
.exclude(pk=form_element_entry_id) \
.filter(form_entry=form_entry)
form.validate_plugin_data(form_elements, request=request)
if form.is_valid():
# Saving the plugin form data.
form.save_plugin_data(request=request)
# Getting the plugin data.
obj.plugin_data = form.get_plugin_data(request=request)
# Save the object.
obj.save()
messages.info(
request,
('The form element plugin "{0}" was edited '
'successfully.').format(form_element_plugin.name)
)
return redirect('events:fobi.edit_form_entry',
form_entry_id=form_entry.pk)
else:
form = form_element_plugin.get_initialised_edit_form_or_404()
form_element_plugin = obj.get_plugin(request=request)
form_element_plugin.request = request
context = {
'form': form,
'form_entry': form_entry,
'form_element_plugin': form_element_plugin,
}
print(form)
# If given, pass to the template (and override the value set by
# the context processor.
if theme:
context.update({'fobi_theme': theme})
if not template_name:
if not theme:
theme = get_theme(request=request, as_instance=True)
template_name = theme.edit_form_element_entry_template
return render(request, template_name, context)
# *****************************************************************************
# **************************** Delete form element entry **********************
# *****************************************************************************
def _delete_plugin_entry_dragos(request,
entry_id,
entry_model_cls,
get_user_plugin_uids_func,
message,
html_anchor):
"""Abstract delete entry.
:param django.http.HttpRequest request:
:param int entry_id:
:param fobi.models.AbstractPluginEntry entry_model_cls: Subclass of
``fobi.models.AbstractPluginEntry``.
:param callable get_user_plugin_uids_func:
:param str message:
:return django.http.HttpResponse:
"""
try:
obj = entry_model_cls._default_manager \
.select_related('form_entry') \
.get(pk=entry_id,
form_entry__user__pk=request.user.pk)
except ObjectDoesNotExist:
raise Http404(("{0} not found.").format(
entry_model_cls._meta.verbose_name)
)
form_entry = obj.form_entry
plugin = obj.get_plugin(request=request)
plugin.request = request
plugin._delete_plugin_data()
obj.delete()
messages.info(request, message.format(plugin.name))
redirect_url = reverse(
'events:fobi.edit_form_entry', kwargs={'form_entry_id': form_entry.pk}
)
return redirect("{0}{1}".format(redirect_url, html_anchor))
@login_required
def delete_form_element_entry(request, form_element_entry_id):
"""Delete form element entry.
:param django.http.HttpRequest request:
:param int form_element_entry_id:
:return django.http.HttpResponse:
"""
return _delete_plugin_entry(
request=request,
entry_id=form_element_entry_id,
entry_model_cls=FormElementEntry,
get_user_plugin_uids_func=get_user_form_field_plugin_uids,
message=(
'The form element plugin "{0}" was deleted successfully.'
),
html_anchor='?active_tab=tab-form-elements'
)
def form_entry_submitted(request, form_entry_slug=None, template_name=None):
"""Form entry submitted.
:param django.http.HttpRequest request:
:param string form_entry_slug:
:param string template_name:
:return django.http.HttpResponse:
"""
try:
kwargs = {'slug': form_entry_slug}
# if not request.user.is_authenticated():
# kwargs.update({'is_public': True})
form_entry = Event._default_manager \
.select_related('user') \
.get(**kwargs)
except ObjectDoesNotExist:
raise Http404("Form entry not found.")
context = {
'form_entry_slug': form_entry_slug,
'form_entry': form_entry
}
if not template_name:
theme = get_theme(request=request, as_instance=True)
template_name = theme.form_entry_submitted_template
return render(request, template_name, context)
|
|
#!/usr/bin/env python
# native Python imports
import os.path
import sys
import annotations
import subprocess
from gemini_load_chunk import GeminiLoader
import time
from cassandra.cluster import Cluster
from string import strip
def load(parser, args):
#if (args.db is None or args.vcf is None):
if args.vcf is None:
parser.print_help()
exit("ERROR: load needs both a VCF file\n")
start_time = time.time()
annos = annotations.get_anno_files( args )
# force skipping CADD and GERP if the data files have not been installed
if args.skip_cadd is False:
if 'cadd_score' not in annos:
sys.stderr.write("\nCADD scores are not being loaded because the"
" annotation file could not be found.\n"
"`Run geminicassandra update --dataonly --extra cadd_score`"
" to install the annotation file.\n\n")
args.skip_cadd = True
else:
sys.stderr.write("CADD scores are being loaded (to skip use:--skip-cadd).\n")
if args.skip_gerp_bp is False:
if 'gerp_bp' not in annos:
sys.stderr.write("\nGERP per bp is not being loaded because the annotation file"
" could not be found.\n Run `geminicassandra update --dataonly --extra gerp_bp`"
" to install the annotation file.\n\n")
args.skip_gerp_bp = True
else:
sys.stderr.write("GERP per bp is being loaded (to skip use:--skip-gerp-bp).\n")
# collect of the the add'l annotation files
annotations.load_annos( args )
time_2 = start_time
time_3 = start_time
if(args.node_num == 1):
gemini_loader = GeminiLoader(args)
gemini_loader.setup_db()
time_2 = time.time()
gemini_loader.single_core_stuff()
time_3 = time.time()
n_variants = 0
if args.cores > 1:
n_variants = load_multicore(args)
else:
n_variants = load_singlecore(args)
insert_n_variants(map(strip, args.contact_points.split(',')), args.keyspace, n_variants)
end_time = time.time()
total_time = str(end_time - start_time)
db_creation_time = str(time_2 - start_time)
single_core_time = str(time_3 - time_2)
parallel_time = str(end_time - time_3)
print "Finished loading in %s s" % total_time
if args.timing_log != None:
with open(args.timing_log, "a") as myfile:
myfile.write(",".join([args.exp_id, total_time, db_creation_time, single_core_time, parallel_time]) + "\n")
def load_singlecore(args):
# create a new geminicassandra loader and populate
# the geminicassandra db and files from the VCF
gemini_loader = GeminiLoader(args)
gemini_loader.connect_to_db()
if not args.no_genotypes and not args.no_load_genotypes:
gemini_loader._init_sample_gt_counts()
gemini_loader.populate_from_vcf()
'''if not args.skip_gene_tables and not args.test_mode:
gemini_loader.update_gene_table()'''
if not args.no_genotypes and not args.no_load_genotypes:
gemini_loader.store_sample_gt_counts()
if not args.test_mode:
gemini_loader.disconnect()
#geminicassandra.add_extras(args.db, [args.db])
def insert_n_variants(db, ks, n):
session = Cluster(db).connect(ks)
from database_cassandra import insert
insert(session, 'row_counts', ['table_name', 'n_rows'], ['variants', n])
def load_multicore(args):
grabix_file = bgzip(args.vcf)
return load_chunks_multicore(grabix_file, args)
# geminicassandra.add_extras(args.db, chunks)
def get_chunk_name(chunk):
return "--chunkdb " + chunk
def load_chunks_multicore(grabix_file, args):
cores = args.cores
# specify the PED file if given one
ped_file = ""
if args.ped_file is not None:
ped_file = "-p " + args.ped_file
# specify the annotation type if given one
anno_type = ""
if args.anno_type is not None:
anno_type = "-t " + args.anno_type
no_genotypes = ""
if args.no_genotypes is True:
no_genotypes = "--no-genotypes"
no_load_genotypes = ""
if args.no_load_genotypes is True:
no_load_genotypes = "--no-load-genotypes"
skip_gerp_bp = ""
if args.skip_gerp_bp is True:
skip_gerp_bp = "--skip-gerp-bp"
skip_gene_tables = ""
if args.skip_gene_tables is True:
skip_gene_tables = "--skip-gene-tables"
skip_cadd = ""
if args.skip_cadd is True:
skip_cadd = "--skip-cadd"
test_mode = ""
if args.test_mode is True:
test_mode = "--test-mode"
passonly = ""
if args.passonly is True:
passonly = "--passonly"
skip_info_string = ""
if args.skip_info_string is True:
skip_info_string = "--skip-info-string"
contact_points = "-db " + args.contact_points
keyspace = "-ks " + args.keyspace
buffer_size = "--buffer-size " + str(args.buffer_size)
max_queue = "--max_queue " + str(args.max_queue)
node_num = "--node_num "
submit_command = "{cmd}"
vcf, _ = os.path.splitext(grabix_file)
n_lines, chunk_steps = get_chunk_steps(grabix_file, args)
chunk_vcfs = []
procs = []
for chunk_num, chunk in chunk_steps:
node_num = node_num + str(chunk_num)
start, stop = chunk
gemini_load = gemini_pipe_load_cmd().format(**locals())
procs.append(subprocess.Popen(submit_command.format(cmd=gemini_load),
shell=True))
chunk_vcf = vcf + ".chunk" + str(chunk_num)
chunk_vcfs.append(chunk_vcf)
node_num = "--node_num "
wait_until_finished(procs)
print "Done loading {0} variants in {1} chunks.".format(stop, chunk_num+1)
return n_lines
def load_chunk(chunk_step, kwargs):
chunk_num, chunk = chunk_step
start, stop = chunk
args = combine_dicts(locals(), kwargs)
gemini_load = gemini_pipe_load_cmd().format(**args)
subprocess.check_call(gemini_load, shell=True)
chunk_db = args["vcf"] + ".chunk" + str(chunk_num) + ".db"
return chunk_db
def wait_until_finished(procs):
[p.wait() for p in procs]
def gemini_pipe_load_cmd():
grabix_cmd = "grabix grab {grabix_file} {start} {stop}"
gemini_load_cmd = ("geminicassandra load_chunk -v - {anno_type} {ped_file}"
" {no_genotypes} {no_load_genotypes} {no_genotypes}"
" {skip_gerp_bp} {skip_gene_tables} {skip_cadd}"
" {passonly} {skip_info_string} {test_mode}"
" -o {start} {contact_points} {keyspace} {buffer_size} {max_queue} {node_num}")
return " | ".join([grabix_cmd, gemini_load_cmd])
def get_chunk_steps(grabix_file, args):
index_file = grabix_index(grabix_file)
num_lines = get_num_lines(index_file)
print "Importing %d variants." % num_lines
chunk_size = int(num_lines) / int(args.cores * args.total_nodes)
print "Breaking {0} into {1} chunks.".format(grabix_file, args.cores)
offset = (args.node_num - 1) * args.cores
starts = []
stops = []
for chunk in range(offset, offset + int(args.cores)):
start = (chunk * chunk_size) + 1
stop = start + chunk_size - 1
# make sure the last chunk covers the remaining lines
if chunk >= (args.cores * args.total_nodes - 1) and stop < num_lines:
stop = num_lines
print "Chunk %d from line %d to %d. Length = %d" % (chunk, start, stop, stop - start)
starts.append(start)
stops.append(stop)
return num_lines, list(enumerate(zip(starts, stops)))
def get_num_lines(index_file):
with open(index_file) as index_handle:
index_handle.next()
num_lines = int(index_handle.next().strip())
return num_lines
def grabix_index(fname):
if not which("grabix"):
print_cmd_not_found_and_exit("grabix")
index_file = fname + ".gbi"
if file_exists(index_file):
return index_file
print "Indexing {0} with grabix.".format(fname)
subprocess.check_call("grabix index {fname}".format(fname=fname), shell=True)
return index_file
def bgzip(fname):
if not which("bgzip"):
print_cmd_not_found_and_exit("bgzip")
if is_gz_file(fname):
return fname
vcf_time = os.path.getmtime(fname)
bgzip_file = fname + ".gz"
if not file_exists(bgzip_file) or \
(file_exists(bgzip_file) and os.path.getmtime(bgzip_file) < vcf_time):
print "Bgzipping {0} into {1}.".format(fname, fname + ".gz")
subprocess.check_call("bgzip -c {fname} > \
{fname}.gz".format(fname=fname),
shell=True)
elif file_exists(bgzip_file) and os.path.getmtime(bgzip_file) > vcf_time:
print "Loading with existing bgzip ({0}) version of {1}.".format(fname + ".gz", fname)
return bgzip_file
def is_gz_file(fname):
_, ext = os.path.splitext(fname)
if ext == ".gz":
return True
else:
return False
def get_submit_command(args):
return "{cmd}"
def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
return os.path.exists(fname) and os.path.getsize(fname) > 0
def which(program):
""" returns the path to an executable or None if it can't be found
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def combine_dicts(d1, d2):
return dict(d1.items() + d2.items())
def get_ipython_args(args):
return (args.scheduler, args.queue, args.cores)
def print_cmd_not_found_and_exit(cmd):
sys.stderr.write("Cannot find {cmd}, install it or put it in your "
"path.".format(cmd=cmd))
exit(1)
def use_scheduler(args):
return bool(args.scheduler)
|
|
"""Geometrical Planes.
Contains
========
Plane
"""
from __future__ import division, print_function
from sympy.core import Dummy, Rational, S, Symbol
from sympy.core.compatibility import is_sequence
from sympy.functions.elementary.trigonometric import acos, asin, sqrt
from sympy.matrices import Matrix
from sympy.polys.polytools import cancel
from sympy.solvers import solve
from sympy.utilities.misc import filldedent
from .entity import GeometryEntity
from .point import Point, Point3D
from .line3d import Line3D, LinearEntity3D, Ray3D, Segment3D
from .line import Line, Ray, Segment
class Plane(GeometryEntity):
"""
A plane is a flat, two-dimensional surface. A plane is the two-dimensional
analogue of a point (zero-dimensions), a line (one-dimension) and a solid
(three-dimensions). A plane can generally be constructed by two types of
inputs. They are three non-collinear points and a point and the plane's
normal vector.
Attributes
==========
p1
normal_vector
Examples
========
>>> from sympy import Plane, Point3D
>>> from sympy.abc import x
>>> Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
Plane(Point3D(1, 1, 1), (-1, 2, -1))
>>> Plane((1, 1, 1), (2, 3, 4), (2, 2, 2))
Plane(Point3D(1, 1, 1), (-1, 2, -1))
>>> Plane(Point3D(1, 1, 1), normal_vector=(1,4,7))
Plane(Point3D(1, 1, 1), (1, 4, 7))
"""
def __new__(cls, p1, a=None, b=None, **kwargs):
p1 = Point3D(p1)
if a and b:
p2 = Point3D(a)
p3 = Point3D(b)
if Point3D.are_collinear(p1, p2, p3):
raise ValueError('Enter three non-collinear points')
a = p1.direction_ratio(p2)
b = p1.direction_ratio(p3)
normal_vector = tuple(Matrix(a).cross(Matrix(b)))
else:
a = kwargs.pop('normal_vector', a)
if is_sequence(a) and len(a) == 3:
normal_vector = Point3D(a).args
else:
raise ValueError(filldedent('''
Either provide 3 3D points or a point with a
normal vector expressed as a sequence of length 3'''))
return GeometryEntity.__new__(cls, p1, normal_vector, **kwargs)
@property
def p1(self):
"""The only defining point of the plane. Others can be obtained from the
arbitrary_point method.
See Also
========
sympy.geometry.point.Point3D
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
>>> a.p1
Point3D(1, 1, 1)
"""
return self.args[0]
@property
def normal_vector(self):
"""Normal vector of the given plane.
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 1), Point3D(2, 3, 4), Point3D(2, 2, 2))
>>> a.normal_vector
(-1, 2, -1)
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 4, 7))
>>> a.normal_vector
(1, 4, 7)
"""
return self.args[1]
def equation(self, x=None, y=None, z=None):
"""The equation of the Plane.
Examples
========
>>> from sympy import Point3D, Plane
>>> a = Plane(Point3D(1, 1, 2), Point3D(2, 4, 7), Point3D(3, 5, 1))
>>> a.equation()
-23*x + 11*y - 2*z + 16
>>> a = Plane(Point3D(1, 4, 2), normal_vector=(6, 6, 6))
>>> a.equation()
6*x + 6*y + 6*z - 42
"""
x, y, z = [i if i else Symbol(j, real=True) for i, j in zip((x, y, z), 'xyz')]
a = Point3D(x, y, z)
b = self.p1.direction_ratio(a)
c = self.normal_vector
return (sum(i*j for i, j in zip(b, c)))
def projection(self, pt):
"""Project the given point onto the plane along the plane normal.
Parameters
==========
Point or Point3D
Returns
=======
Point3D
Examples
========
>>> from sympy import Plane, Point, Point3D
>>> A = Plane(Point3D(1, 1, 2), normal_vector=(1, 1, 1))
The projection is along the normal vector direction, not the z
axis, so (1, 1) does not project to (1, 1, 2) on the plane A:
>>> b = Point(1, 1)
>>> A.projection(b)
Point3D(5/3, 5/3, 2/3)
>>> _ in A
True
But the point (1, 1, 2) projects to (1, 1) on the XY-plane:
>>> XY = Plane((0, 0, 0), (0, 0, 1))
>>> XY.projection((1, 1, 2))
Point3D(1, 1, 0)
"""
rv = Point3D(pt)
if rv in self:
return rv
return self.intersection(Line3D(rv, rv + Point3D(self.normal_vector)))[0]
def projection_line(self, line):
"""Project the given line onto the plane through the normal plane
containing the line.
Parameters
==========
LinearEntity or LinearEntity3D
Returns
=======
Point3D, Line3D, Ray3D or Segment3D
Notes
=====
For the interaction between 2D and 3D lines(segments, rays), you should
convert the line to 3D by using this method. For example for finding the
intersection between a 2D and a 3D line, convert the 2D line to a 3D line
by projecting it on a required plane and then proceed to find the
intersection between those lines.
Examples
========
>>> from sympy import Plane, Line, Line3D, Point, Point3D
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 1, 1))
>>> b = Line(Point(1, 1), Point(2, 2))
>>> a.projection_line(b)
Line3D(Point3D(4/3, 4/3, 1/3), Point3D(5/3, 5/3, -1/3))
>>> c = Line3D(Point3D(1, 1, 1), Point3D(2, 2, 2))
>>> a.projection_line(c)
Point3D(1, 1, 1)
"""
from sympy.geometry.line import LinearEntity
from sympy.geometry.line3d import LinearEntity3D
if not isinstance(line, (LinearEntity, LinearEntity3D)):
raise NotImplementedError('Enter a linear entity only')
a, b = self.projection(line.p1), self.projection(line.p2)
if a == b:
# projection does not imply intersection so for
# this case (line parallel to plane's normal) we
# return the projection point
return a
if isinstance(line, (Line, Line3D)):
return Line3D(a, b)
if isinstance(line, (Ray, Ray3D)):
return Ray3D(a, b)
if isinstance(line, (Segment, Segment3D)):
return Segment3D(a, b)
def is_parallel(self, l):
"""Is the given geometric entity parallel to the plane?
Parameters
==========
LinearEntity3D or Plane
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> b = Plane(Point3D(3,1,3), normal_vector=(4, 8, 12))
>>> a.is_parallel(b)
True
"""
from sympy.geometry.line3d import LinearEntity3D
if isinstance(l, LinearEntity3D):
a = l.direction_ratio
b = self.normal_vector
c = sum([i*j for i, j in zip(a, b)])
if c == 0:
return True
else:
return False
elif isinstance(l, Plane):
a = Matrix(l.normal_vector)
b = Matrix(self.normal_vector)
if a.cross(b).is_zero:
return True
else:
return False
def is_perpendicular(self, l):
"""is the given geometric entity perpendicualar to the given plane?
Parameters
==========
LinearEntity3D or Plane
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> b = Plane(Point3D(2, 2, 2), normal_vector=(-1, 2, -1))
>>> a.is_perpendicular(b)
True
"""
from sympy.geometry.line3d import LinearEntity3D
if isinstance(l, LinearEntity3D):
a = Matrix(l.direction_ratio)
b = Matrix(self.normal_vector)
if a.cross(b).is_zero:
return True
else:
return False
elif isinstance(l, Plane):
a = Matrix(l.normal_vector)
b = Matrix(self.normal_vector)
if a.dot(b) == 0:
return True
else:
return False
else:
return False
def distance(self, o):
"""Distance beteen the plane and another geometric entity.
Parameters
==========
Point3D, LinearEntity3D, Plane.
Returns
=======
distance
Notes
=====
This method accepts only 3D entities as it's parameter, but if you want
to calculate the distance between a 2D entity and a plane you should
first convert to a 3D entity by projecting onto a desired plane and
then proceed to calculate the distance.
Examples
========
>>> from sympy import Point, Point3D, Line, Line3D, Plane
>>> a = Plane(Point3D(1, 1, 1), normal_vector=(1, 1, 1))
>>> b = Point3D(1, 2, 3)
>>> a.distance(b)
sqrt(3)
>>> c = Line3D(Point3D(2, 3, 1), Point3D(1, 2, 2))
>>> a.distance(c)
0
"""
from sympy.geometry.line3d import LinearEntity3D
x, y, z = map(Dummy, 'xyz')
if self.intersection(o) != []:
return S.Zero
if isinstance(o, Point3D):
x, y, z = map(Dummy, 'xyz')
k = self.equation(x, y, z)
a, b, c = [k.coeff(i) for i in (x, y, z)]
d = k.xreplace({x: o.args[0], y: o.args[1], z: o.args[2]})
t = abs(d/sqrt(a**2 + b**2 + c**2))
return t
if isinstance(o, LinearEntity3D):
a, b = o.p1, self.p1
c = Matrix(a.direction_ratio(b))
d = Matrix(self.normal_vector)
e = c.dot(d)
f = sqrt(sum([i**2 for i in self.normal_vector]))
return abs(e / f)
if isinstance(o, Plane):
a, b = o.p1, self.p1
c = Matrix(a.direction_ratio(b))
d = Matrix(self.normal_vector)
e = c.dot(d)
f = sqrt(sum([i**2 for i in self.normal_vector]))
return abs(e / f)
def angle_between(self, o):
"""Angle between the plane and other geometric entity.
Parameters
==========
LinearEntity3D, Plane.
Returns
=======
angle : angle in radians
Notes
=====
This method accepts only 3D entities as it's parameter, but if you want
to calculate the angle between a 2D entity and a plane you should
first convert to a 3D entity by projecting onto a desired plane and
then proceed to calculate the angle.
Examples
========
>>> from sympy import Point3D, Line3D, Plane
>>> a = Plane(Point3D(1, 2, 2), normal_vector=(1, 2, 3))
>>> b = Line3D(Point3D(1, 3, 4), Point3D(2, 2, 2))
>>> a.angle_between(b)
-asin(sqrt(21)/6)
"""
from sympy.geometry.line3d import LinearEntity3D
if isinstance(o, LinearEntity3D):
a = Matrix(self.normal_vector)
b = Matrix(o.direction_ratio)
c = a.dot(b)
d = sqrt(sum([i**2 for i in self.normal_vector]))
e = sqrt(sum([i**2 for i in o.direction_ratio]))
return asin(c/(d*e))
if isinstance(o, Plane):
a = Matrix(self.normal_vector)
b = Matrix(o.normal_vector)
c = a.dot(b)
d = sqrt(sum([i**2 for i in self.normal_vector]))
e = sqrt(sum([i**2 for i in o.normal_vector]))
return acos(c/(d*e))
@staticmethod
def are_concurrent(*planes):
"""Is a sequence of Planes concurrent?
Two or more Planes are concurrent if their intersections
are a common line.
Parameters
==========
planes: list
Returns
=======
Boolean
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(5, 0, 0), normal_vector=(1, -1, 1))
>>> b = Plane(Point3D(0, -2, 0), normal_vector=(3, 1, 1))
>>> c = Plane(Point3D(0, -1, 0), normal_vector=(5, -1, 9))
>>> Plane.are_concurrent(a, b)
True
>>> Plane.are_concurrent(a, b, c)
False
"""
planes = set(planes)
for i in planes:
if not isinstance(i, Plane):
raise ValueError('All objects should be Planes but got %s' % i.func)
if len(planes) < 2:
return False
planes = list(planes)
first = planes.pop(0)
sol = first.intersection(planes[0])
if sol == []:
return False
else:
line = sol[0]
for i in planes[1:]:
l = first.intersection(i)
if not l or not l[0] in line:
return False
return True
def perpendicular_line(self, pt):
"""A line perpendicular to the given plane.
Parameters
==========
pt: Point3D
Returns
=======
Line3D
Examples
========
>>> from sympy import Plane, Point3D, Line3D
>>> a = Plane(Point3D(1,4,6), normal_vector=(2, 4, 6))
>>> a.perpendicular_line(Point3D(9, 8, 7))
Line3D(Point3D(9, 8, 7), Point3D(11, 12, 13))
"""
a = self.normal_vector
return Line3D(pt, direction_ratio=a)
def parallel_plane(self, pt):
"""
Plane parallel to the given plane and passing through the point pt.
Parameters
==========
pt: Point3D
Returns
=======
Plane
Examples
========
>>> from sympy import Plane, Point3D
>>> a = Plane(Point3D(1, 4, 6), normal_vector=(2, 4, 6))
>>> a.parallel_plane(Point3D(2, 3, 5))
Plane(Point3D(2, 3, 5), (2, 4, 6))
"""
a = self.normal_vector
return Plane(pt, normal_vector=a)
def perpendicular_plane(self, *pts):
"""
Return a perpendicular passing through the given points. If the
direction ratio between the points is the same as the Plane's normal
vector then, to select from the infinite number of possible planes,
a third point will be chosen on the z-axis (or the y-axis
if the normal vector is already parallel to the z-axis). If less than
two points are given they will be supplied as follows: if no point is
given then pt1 will be self.p1; if a second point is not given it will
be a point through pt1 on a line parallel to the z-axis (if the normal
is not already the z-axis, otherwise on the line parallel to the
y-axis).
Parameters
==========
pts: 0, 1 or 2 Point3D
Returns
=======
Plane
Examples
========
>>> from sympy import Plane, Point3D, Line3D
>>> a, b = Point3D(0, 0, 0), Point3D(0, 1, 0)
>>> Z = (0, 0, 1)
>>> p = Plane(a, normal_vector=Z)
>>> p.perpendicular_plane(a, b)
Plane(Point3D(0, 0, 0), (1, 0, 0))
"""
if len(pts) > 2:
raise ValueError('No more than 2 pts should be provided.')
pts = list(pts)
if len(pts) == 0:
pts.append(self.p1)
if len(pts) == 1:
x, y, z = self.normal_vector
if x == y == 0:
dir = (0, 1, 0)
else:
dir = (0, 0, 1)
pts.append(pts[0] + Point3D(*dir))
p1, p2 = [Point3D(i) for i in pts]
l = Line3D(p1, p2)
n = Line3D(p1, direction_ratio=self.normal_vector)
if l in n: # XXX should an error be raised instead?
# there are infinitely many perpendicular planes;
x, y, z = self.normal_vector
if x == y == 0:
# the z axis is the normal so pick a pt on the y-axis
p3 = Point3D(0, 1, 0) # case 1
else:
# else pick a pt on the z axis
p3 = Point3D(0, 0, 1) # case 2
# in case that point is already given, move it a bit
if p3 in l:
p3 *= 2 # case 3
else:
p3 = p1 + Point3D(*self.normal_vector) # case 4
return Plane(p1, p2, p3)
def random_point(self, seed=None):
""" Returns a random point on the Plane.
Returns
=======
Point3D
"""
import random
if seed is not None:
rng = random.Random(seed)
else:
rng = random
t = Dummy('t')
return self.arbitrary_point(t).subs(t, Rational(rng.random()))
def arbitrary_point(self, t=None):
""" Returns an arbitrary point on the Plane; varying `t` from 0 to 2*pi
will move the point in a circle of radius 1 about p1 of the Plane.
Examples
========
>>> from sympy.geometry.plane import Plane
>>> from sympy.abc import t
>>> p = Plane((0, 0, 0), (0, 0, 1), (0, 1, 0))
>>> p.arbitrary_point(t)
Point3D(0, cos(t), sin(t))
>>> _.distance(p.p1).simplify()
1
Returns
=======
Point3D
"""
from sympy import cos, sin
t = t or Dummy('t')
x, y, z = self.normal_vector
a, b, c = self.p1.args
if x == y == 0:
return Point3D(a + cos(t), b + sin(t), c)
elif x == z == 0:
return Point3D(a + cos(t), b, c + sin(t))
elif y == z == 0:
return Point3D(a, b + cos(t), c + sin(t))
m = Dummy()
p = self.projection(Point3D(self.p1.x + cos(t), self.p1.y + sin(t), 0)*m)
# TODO: Replace solve with solveset, when this line is tested
return p.xreplace({m: solve(p.distance(self.p1) - 1, m)[0]})
def intersection(self, o):
""" The intersection with other geometrical entity.
Parameters
==========
Point, Point3D, LinearEntity, LinearEntity3D, Plane
Returns
=======
List
Examples
========
>>> from sympy import Point, Point3D, Line, Line3D, Plane
>>> a = Plane(Point3D(1, 2, 3), normal_vector=(1, 1, 1))
>>> b = Point3D(1, 2, 3)
>>> a.intersection(b)
[Point3D(1, 2, 3)]
>>> c = Line3D(Point3D(1, 4, 7), Point3D(2, 2, 2))
>>> a.intersection(c)
[Point3D(2, 2, 2)]
>>> d = Plane(Point3D(6, 0, 0), normal_vector=(2, -5, 3))
>>> e = Plane(Point3D(2, 0, 0), normal_vector=(3, 4, -3))
>>> d.intersection(e)
[Line3D(Point3D(78/23, -24/23, 0), Point3D(147/23, 321/23, 23))]
"""
from sympy.geometry.line3d import LinearEntity3D
from sympy.geometry.line import LinearEntity
if isinstance(o, (Point, Point3D)):
if o in self:
return [Point3D(o)]
else:
return []
if isinstance(o, (LinearEntity, LinearEntity3D)):
if o in self:
p1, p2 = o.p1, o.p2
if isinstance(o, Segment):
o = Segment3D(p1, p2)
elif isinstance(o, Ray):
o = Ray3D(p1, p2)
elif isinstance(o, Line):
o = Line3D(p1, p2)
else:
raise ValueError('unhandled linear entity: %s' % o.func)
return [o]
else:
x, y, z = map(Dummy, 'xyz')
t = Dummy() # unnamed else it may clash with a symbol in o
a = Point3D(o.arbitrary_point(t))
b = self.equation(x, y, z)
# TODO: Replace solve with solveset, when this line is tested
c = solve(b.subs(list(zip((x, y, z), a.args))), t)
if not c:
return []
else:
p = a.subs(t, c[0])
if p not in self:
return [] # e.g. a segment might not intersect a plane
return [p]
if isinstance(o, Plane):
if o == self:
return [self]
if self.is_parallel(o):
return []
else:
x, y, z = map(Dummy, 'xyz')
a, b = Matrix([self.normal_vector]), Matrix([o.normal_vector])
c = list(a.cross(b))
d = self.equation(x, y, z)
e = o.equation(x, y, z)
# TODO: Replace solve with solveset, when this line is tested
f = solve((d.subs(z, 0), e.subs(z, 0)), [x, y])
if len(f) == 2:
return [Line3D(Point3D(f[x], f[y], 0), direction_ratio=c)]
# TODO: Replace solve with solveset, when this line is tested
g = solve((d.subs(y, 0), e.subs(y, 0)),[x, z])
if len(g) == 2:
return [Line3D(Point3D(g[x], 0, g[z]), direction_ratio=c)]
# TODO: Replace solve with solveset, when this line is tested
h = solve((d.subs(x, 0), e.subs(x, 0)),[y, z])
if len(h) == 2:
return [Line3D(Point3D(0, h[y], h[z]), direction_ratio=c)]
def __contains__(self, o):
from sympy.geometry.line3d import LinearEntity3D
from sympy.geometry.line import LinearEntity
x, y, z = map(Dummy, 'xyz')
k = self.equation(x, y, z)
if isinstance(o, Point):
o = Point3D(o)
if isinstance(o, Point3D):
d = k.xreplace(dict(zip((x, y, z), o.args)))
return d.equals(0)
elif isinstance(o, (LinearEntity, LinearEntity3D)):
t = Dummy()
d = Point3D(o.arbitrary_point(t))
e = k.subs([(x, d.x), (y, d.y), (z, d.z)])
return e.equals(0)
else:
return False
def is_coplanar(self, o):
""" Returns True if `o` is coplanar with self, else False.
Examples
========
>>> from sympy import Plane, Point3D
>>> o = (0, 0, 0)
>>> p = Plane(o, (1, 1, 1))
>>> p2 = Plane(o, (2, 2, 2))
>>> p == p2
False
>>> p.is_coplanar(p2)
True
"""
if isinstance(o, Plane):
x, y, z = map(Dummy, 'xyz')
return not cancel(self.equation(x, y, z)/o.equation(x, y, z)).has(x, y, z)
if isinstance(o, Point3D):
return o in self
elif isinstance(o, LinearEntity3D):
return all(i in self for i in self)
elif isinstance(o, GeometryEntity): # XXX should only be handling 2D objects now
return all(i == 0 for i in self.normal_vector[:2])
|
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.security_group
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey.constants import TROUBLE_REGIONS
from security_monkey.exceptions import BotoConnectionIssue
from security_monkey.datastore import Account
from security_monkey import app, ARN_PREFIX
class SecurityGroup(Watcher):
index = 'securitygroup'
i_am_singular = 'Security Group'
i_am_plural = 'Security Groups'
def __init__(self, accounts=None, debug=False):
super(SecurityGroup, self).__init__(accounts=accounts, debug=debug)
# TODO: grab those from DB
self.instance_detail = app.config.get("SECURITYGROUP_INSTANCE_DETAIL", 'FULL')
self.honor_ephemerals = True
self.ephemeral_paths = ["assigned_to"]
def get_detail_level(self):
""" Return details level: 'NONE' / 'SUMMARY' / 'FULL' """
if self.instance_detail:
return self.instance_detail
else:
return 'NONE'
def _build_rule(self, rule, rule_type):
rule_list=[]
#base rule information
rule_config = {
"ip_protocol": rule.get('IpProtocol'),
"rule_type": rule_type,
"from_port": rule.get('FromPort'),
"to_port": rule.get('ToPort'),
"cidr_ip": None,
"owner_id": None,
"group_id": None,
"name": None
}
for ips in rule.get('IpRanges'):
#make a copy of the base rule info.
new_rule=rule_config.copy()
new_rule['cidr_ip'] = ips.get('CidrIp')
rule_list.append(new_rule)
for ips in rule.get('Ipv6Ranges'):
#make a copy of the base rule info.
new_rule=rule_config.copy()
new_rule['cidr_ip'] = ips.get('CidrIpv6')
rule_list.append(new_rule)
for user_id_group_pairs in rule.get('UserIdGroupPairs'):
#make a copy of the base rule info.
new_rule=rule_config.copy()
new_rule['owner_id'] = user_id_group_pairs.get('UserId')
new_rule['group_id'] = user_id_group_pairs.get('GroupId')
# Pritam adding support for missing SG Group Name for CIS 4.4 Rule Validation
new_rule['name'] = user_id_group_pairs.get('GroupName') if user_id_group_pairs.get('GroupName') else rule.get('GroupName')
rule_list.append(new_rule)
return rule_list
def slurp(self):
"""
:returns: item_list - list of Security Groups.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
item_list = []
exception_map = {}
from security_monkey.common.sts_connect import connect
for account in self.accounts:
account_db = Account.query.filter(Account.name == account).first()
account_number = account_db.identifier
try:
ec2 = connect(account, 'ec2')
regions = ec2.get_all_regions()
except Exception as e: # EC2ResponseError
# Some Accounts don't subscribe to EC2 and will throw an exception here.
exc = BotoConnectionIssue(str(e), self.index, account, None)
self.slurp_exception((self.index, account), exc, exception_map, source="{}-watcher".format(self.index))
continue
for region in regions:
app.logger.debug("Checking {}/{}/{}".format(self.index, account, region.name))
try:
rec2 = connect(account, 'boto3.ec2.client', region=region)
# Retrieve security groups here
sgs = self.wrap_aws_rate_limited_call(
rec2.describe_security_groups
)
if self.get_detail_level() != 'NONE':
# We fetch tags here to later correlate instances
tags = self.wrap_aws_rate_limited_call(
rec2.describe_tags
)
# Retrieve all instances
instances = self.wrap_aws_rate_limited_call(
rec2.describe_instances
)
app.logger.info("Number of instances found in region {}: {}".format(region.name, len(instances)))
except Exception as e:
if region.name not in TROUBLE_REGIONS:
exc = BotoConnectionIssue(str(e), self.index, account, region.name)
self.slurp_exception((self.index, account, region.name), exc, exception_map,
source="{}-watcher".format(self.index))
continue
app.logger.debug("Found {} {}".format(len(sgs), self.i_am_plural))
if self.get_detail_level() != 'NONE':
app.logger.info("Creating mapping of sg_id's to instances")
# map sgid => instance
sg_instances = {}
for reservation in instances['Reservations']:
for instance in reservation['Instances']:
for group in instance['SecurityGroups']:
if group['GroupId'] not in sg_instances:
sg_instances[group['GroupId']] = [instance]
else:
sg_instances[group['GroupId']].append(instance)
app.logger.info("Creating mapping of instance_id's to tags")
# map instanceid => tags
instance_tags = {}
for tag in tags['Tags']:
if tag['ResourceId'] not in instance_tags:
instance_tags[tag['ResourceId']] = [tag]
else:
instance_tags[tag['ResourceId']].append(tag)
app.logger.info("Done creating mappings")
for sg in sgs['SecurityGroups']:
if self.check_ignore_list(sg['GroupName']):
continue
arn = ARN_PREFIX + ':ec2:{region}:{account_number}:security-group/{security_group_id}'.format(
region=region.name,
account_number=account_number,
security_group_id=sg['GroupId'])
item_config = {
"id": sg['GroupId'],
"name": sg['GroupName'],
"description": sg.get('Description'),
"vpc_id": sg.get('VpcId'),
"owner_id": sg.get('OwnerId'),
"region": region.name,
"rules": [],
"assigned_to": None,
"arn": arn
}
for rule in sg['IpPermissions']:
# Pritam adding support for missing SG Group Name for CIS 4.4 Rule Validation
if not rule.get('GroupName',None):
rule['GroupName'] = sg['GroupName']
item_config['rules'] += self._build_rule(rule,"ingress")
for rule in sg['IpPermissionsEgress']:
# Pritam adding support for missing SG Group Name for CIS 4.4 Rule Validation
if not rule.get('GroupName',None):
rule['GroupName'] = sg['GroupName']
item_config['rules'] += self._build_rule(rule,"egress")
item_config['rules'] = sorted(item_config['rules'])
if self.get_detail_level() == 'SUMMARY':
if sg['InstanceId'] in sg_instances:
item_config["assigned_to"] = "{} instances".format(len(sg_instances[sg['GroupId']]))
else:
item_config["assigned_to"] = "0 instances"
elif self.get_detail_level() == 'FULL':
assigned_to = []
if sg['GroupId'] in sg_instances:
for instance in sg_instances[sg['GroupId']]:
if instance['InstanceId'] in instance_tags:
tagdict = {tag['Key']: tag['Value'] for tag in instance_tags[instance['InstanceId']]}
tagdict["instance_id"] = instance['InstanceId']
else:
tagdict = {"instance_id": instance['InstanceId']}
assigned_to.append(tagdict)
item_config["assigned_to"] = assigned_to
# Issue 40: Security Groups can have a name collision between EC2 and
# VPC or between different VPCs within a given region.
if sg.get('VpcId'):
sg_name = "{0} ({1} in {2})".format(sg['GroupName'], sg['GroupId'], sg['VpcId'])
else:
sg_name = "{0} ({1})".format(sg['GroupName'], sg['GroupId'])
item = SecurityGroupItem(region=region.name, account=account, name=sg_name, arn=arn, config=item_config)
item_list.append(item)
return item_list, exception_map
class SecurityGroupItem(ChangeItem):
def __init__(self, region=None, account=None, name=None, arn=None, config={}):
super(SecurityGroupItem, self).__init__(
index=SecurityGroup.index,
region=region,
account=account,
name=name,
arn=arn,
new_config=config)
|
|
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains functions for registering clients."""
from twisted.internet import defer
from synapse.types import UserID
from synapse.api.errors import (
AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError
)
from ._base import BaseHandler
import synapse.util.stringutils as stringutils
from synapse.util.async import run_on_reactor
from synapse.http.client import CaptchaServerHttpClient
import base64
import bcrypt
import logging
import urllib
logger = logging.getLogger(__name__)
class RegistrationHandler(BaseHandler):
def __init__(self, hs):
super(RegistrationHandler, self).__init__(hs)
self.distributor = hs.get_distributor()
self.distributor.declare("registered_user")
@defer.inlineCallbacks
def check_username(self, localpart):
yield run_on_reactor()
if urllib.quote(localpart) != localpart:
raise SynapseError(
400,
"User ID must only contain characters which do not"
" require URL encoding."
)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
users = yield self.store.get_users_by_id_case_insensitive(user_id)
if users:
raise SynapseError(
400,
"User ID already taken.",
errcode=Codes.USER_IN_USE,
)
@defer.inlineCallbacks
def register(self, localpart=None, password=None):
"""Registers a new client on the server.
Args:
localpart : The local part of the user ID to register. If None,
one will be randomly generated.
password (str) : The password to assign to this user so they can
login again. This can be None which means they cannot login again
via a password (e.g. the user is an application service user).
Returns:
A tuple of (user_id, access_token).
Raises:
RegistrationError if there was a problem registering.
"""
yield run_on_reactor()
password_hash = None
if password:
password_hash = bcrypt.hashpw(password, bcrypt.gensalt())
if localpart:
yield self.check_username(localpart)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
token = self.generate_token(user_id)
yield self.store.register(
user_id=user_id,
token=token,
password_hash=password_hash
)
yield self.distributor.fire("registered_user", user)
else:
# autogen a random user ID
attempts = 0
user_id = None
token = None
while not user_id and not token:
try:
localpart = self._generate_user_id()
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
token = self.generate_token(user_id)
yield self.store.register(
user_id=user_id,
token=token,
password_hash=password_hash)
self.distributor.fire("registered_user", user)
except SynapseError:
# if user id is taken, just generate another
user_id = None
token = None
attempts += 1
if attempts > 5:
raise RegistrationError(
500, "Cannot generate user ID.")
# create a default avatar for the user
# XXX: ideally clients would explicitly specify one, but given they don't
# and we want consistent and pretty identicons for random users, we'll
# do it here.
try:
auth_user = UserID.from_string(user_id)
media_repository = self.hs.get_resource_for_media_repository()
identicon_resource = media_repository.getChildWithDefault("identicon", None)
upload_resource = media_repository.getChildWithDefault("upload", None)
identicon_bytes = identicon_resource.generate_identicon(user_id, 320, 320)
content_uri = yield upload_resource.create_content(
"image/png", None, identicon_bytes, len(identicon_bytes), auth_user
)
profile_handler = self.hs.get_handlers().profile_handler
profile_handler.set_avatar_url(
auth_user, auth_user, ("%s#auto" % (content_uri,))
)
except NotImplementedError:
pass # make tests pass without messing around creating default avatars
defer.returnValue((user_id, token))
@defer.inlineCallbacks
def appservice_register(self, user_localpart, as_token):
user = UserID(user_localpart, self.hs.hostname)
user_id = user.to_string()
service = yield self.store.get_app_service_by_token(as_token)
if not service:
raise AuthError(403, "Invalid application service token.")
if not service.is_interested_in_user(user_id):
raise SynapseError(
400, "Invalid user localpart for this application service.",
errcode=Codes.EXCLUSIVE
)
token = self.generate_token(user_id)
yield self.store.register(
user_id=user_id,
token=token,
password_hash=""
)
self.distributor.fire("registered_user", user)
defer.returnValue((user_id, token))
@defer.inlineCallbacks
def check_recaptcha(self, ip, private_key, challenge, response):
"""
Checks a recaptcha is correct.
Used only by c/s api v1
"""
captcha_response = yield self._validate_captcha(
ip,
private_key,
challenge,
response
)
if not captcha_response["valid"]:
logger.info("Invalid captcha entered from %s. Error: %s",
ip, captcha_response["error_url"])
raise InvalidCaptchaError(
error_url=captcha_response["error_url"]
)
else:
logger.info("Valid captcha entered from %s", ip)
@defer.inlineCallbacks
def register_saml2(self, localpart):
"""
Registers email_id as SAML2 Based Auth.
"""
if urllib.quote(localpart) != localpart:
raise SynapseError(
400,
"User ID must only contain characters which do not"
" require URL encoding."
)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
token = self.generate_token(user_id)
try:
yield self.store.register(
user_id=user_id,
token=token,
password_hash=None
)
yield self.distributor.fire("registered_user", user)
except Exception, e:
yield self.store.add_access_token_to_user(user_id, token)
# Ignore Registration errors
logger.exception(e)
defer.returnValue((user_id, token))
@defer.inlineCallbacks
def register_email(self, threepidCreds):
"""
Registers emails with an identity server.
Used only by c/s api v1
"""
for c in threepidCreds:
logger.info("validating theeepidcred sid %s on id server %s",
c['sid'], c['idServer'])
try:
identity_handler = self.hs.get_handlers().identity_handler
threepid = yield identity_handler.threepid_from_creds(c)
except:
logger.exception("Couldn't validate 3pid")
raise RegistrationError(400, "Couldn't validate 3pid")
if not threepid:
raise RegistrationError(400, "Couldn't validate 3pid")
logger.info("got threepid with medium '%s' and address '%s'",
threepid['medium'], threepid['address'])
@defer.inlineCallbacks
def bind_emails(self, user_id, threepidCreds):
"""Links emails with a user ID and informs an identity server.
Used only by c/s api v1
"""
# Now we have a matrix ID, bind it to the threepids we were given
for c in threepidCreds:
identity_handler = self.hs.get_handlers().identity_handler
# XXX: This should be a deferred list, shouldn't it?
yield identity_handler.bind_threepid(c, user_id)
@defer.inlineCallbacks
def check_user_id_is_valid(self, user_id):
# valid user IDs must not clash with any user ID namespaces claimed by
# application services.
services = yield self.store.get_app_services()
interested_services = [
s for s in services if s.is_interested_in_user(user_id)
]
for service in interested_services:
if service.is_exclusive_user(user_id):
raise SynapseError(
400, "This user ID is reserved by an application service.",
errcode=Codes.EXCLUSIVE
)
def generate_token(self, user_id):
# urlsafe variant uses _ and - so use . as the separator and replace
# all =s with .s so http clients don't quote =s when it is used as
# query params.
return (base64.urlsafe_b64encode(user_id).replace('=', '.') + '.' +
stringutils.random_string(18))
def _generate_user_id(self):
return "-" + stringutils.random_string(18)
@defer.inlineCallbacks
def _validate_captcha(self, ip_addr, private_key, challenge, response):
"""Validates the captcha provided.
Used only by c/s api v1
Returns:
dict: Containing 'valid'(bool) and 'error_url'(str) if invalid.
"""
response = yield self._submit_captcha(ip_addr, private_key, challenge,
response)
# parse Google's response. Lovely format..
lines = response.split('\n')
json = {
"valid": lines[0] == 'true',
"error_url": "http://www.google.com/recaptcha/api/challenge?" +
"error=%s" % lines[1]
}
defer.returnValue(json)
@defer.inlineCallbacks
def _submit_captcha(self, ip_addr, private_key, challenge, response):
"""
Used only by c/s api v1
"""
# TODO: get this from the homeserver rather than creating a new one for
# each request
client = CaptchaServerHttpClient(self.hs)
data = yield client.post_urlencoded_get_raw(
"http://www.google.com:80/recaptcha/api/verify",
args={
'privatekey': private_key,
'remoteip': ip_addr,
'challenge': challenge,
'response': response
}
)
defer.returnValue(data)
|
|
"""Makefile for the cmdln project.
${common_task_list}
See `mk -h' for options.
"""
import sys
import os
from os.path import join, dirname, normpath, abspath, exists, basename
import re
from glob import glob
import webbrowser
from mklib.common import MkError
from mklib import Task
from mklib import sh
class bugs(Task):
"""Open bug database page."""
def make(self):
webbrowser.open("http://code.google.com/p/cmdln/issues/list")
class site(Task):
"""Open the Google Code project page."""
def make(self):
webbrowser.open("http://code.google.com/p/cmdln/")
class clean(Task):
"""Clean generated files and dirs."""
def make(self):
patterns = [
"dist",
"build",
"MANIFEST",
"*.pyc",
"docs/*.pyc",
"examples/*.pyc",
"lib/*.pyc",
]
for pattern in patterns:
p = join(self.dir, pattern)
for path in glob(p):
sh.rm(path, log=self.log)
class sdist(Task):
"""python setup.py sdist"""
def make(self):
sh.run_in_dir("%spython setup.py sdist -f --formats zip"
% _setup_command_prefix(),
self.dir, self.log.debug)
class pypi_upload(Task):
"""Upload release to pypi."""
def make(self):
tasks = (sys.platform == "win32"
and "sdist --formats zip bdist_wininst upload"
or "sdist --formats zip upload")
sh.run_in_dir("%spython setup.py %s" % (_setup_command_prefix(), tasks),
self.dir, self.log.debug)
sys.path.insert(0, join(self.dir, "lib"))
url = "http://pypi.python.org/pypi/cmdln/"
import webbrowser
webbrowser.open_new(url)
class googlecode_upload(Task):
"""Upload sdist to Google Code project site."""
deps = ["sdist"]
def make(self):
try:
import googlecode_upload
except ImportError:
raise MkError("couldn't import `googlecode_upload` (get it from http://support.googlecode.com/svn/trunk/scripts/googlecode_upload.py)")
ver = _get_version()
sdist_path = join(self.dir, "dist", "cmdln-%s.zip" % ver)
status, reason, url = googlecode_upload.upload_find_auth(
sdist_path,
"cmdln", # project_name
"cmdln %s source package" % ver, # summary
["Featured", "Type-Archive"]) # labels
if not url:
raise MkError("couldn't upload sdsit to Google Code: %s (%s)"
% (reason, status))
self.log.info("uploaded sdist to `%s'", url)
project_url = "http://code.google.com/p/cmdln/"
import webbrowser
webbrowser.open_new(project_url)
class todo(Task):
"""Print out todo's and xxx's in the docs area."""
def make(self):
for path in _paths_from_path_patterns(['.'],
excludes=[".svn", "*.pyc", "TO""DO.txt", "Makefile.py",
"*.png", "*.gif", "*.pprint", "*.prof",
"tmp-*"]):
self._dump_pattern_in_path("TO\DO\\|XX\X", path)
path = join(self.dir, "TO""DO.txt")
todos = re.compile("^- ", re.M).findall(open(path, 'r').read())
print "(plus %d TODOs from TO""DO.txt)" % len(todos)
def _dump_pattern_in_path(self, pattern, path):
os.system("grep -nH '%s' '%s'" % (pattern, path))
#---- internal support stuff
# Recipe: paths_from_path_patterns (0.3.7)
def _should_include_path(path, includes, excludes):
"""Return True iff the given path should be included."""
from os.path import basename
from fnmatch import fnmatch
base = basename(path)
if includes:
for include in includes:
if fnmatch(base, include):
try:
log.debug("include `%s' (matches `%s')", path, include)
except (NameError, AttributeError):
pass
break
else:
try:
log.debug("exclude `%s' (matches no includes)", path)
except (NameError, AttributeError):
pass
return False
for exclude in excludes:
if fnmatch(base, exclude):
try:
log.debug("exclude `%s' (matches `%s')", path, exclude)
except (NameError, AttributeError):
pass
return False
return True
_NOT_SPECIFIED = ("NOT", "SPECIFIED")
def _paths_from_path_patterns(path_patterns, files=True, dirs="never",
recursive=True, includes=[], excludes=[],
on_error=_NOT_SPECIFIED):
"""_paths_from_path_patterns([<path-patterns>, ...]) -> file paths
Generate a list of paths (files and/or dirs) represented by the given path
patterns.
"path_patterns" is a list of paths optionally using the '*', '?' and
'[seq]' glob patterns.
"files" is boolean (default True) indicating if file paths
should be yielded
"dirs" is string indicating under what conditions dirs are
yielded. It must be one of:
never (default) never yield dirs
always yield all dirs matching given patterns
if-not-recursive only yield dirs for invocations when
recursive=False
See use cases below for more details.
"recursive" is boolean (default True) indicating if paths should
be recursively yielded under given dirs.
"includes" is a list of file patterns to include in recursive
searches.
"excludes" is a list of file and dir patterns to exclude.
(Note: This is slightly different than GNU grep's --exclude
option which only excludes *files*. I.e. you cannot exclude
a ".svn" dir.)
"on_error" is an error callback called when a given path pattern
matches nothing:
on_error(PATH_PATTERN)
If not specified, the default is look for a "log" global and
call:
log.error("`%s': No such file or directory")
Specify None to do nothing.
Typically this is useful for a command-line tool that takes a list
of paths as arguments. (For Unix-heads: the shell on Windows does
NOT expand glob chars, that is left to the app.)
Use case #1: like `grep -r`
{files=True, dirs='never', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield nothing
script PATH* # yield all files matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files (not dirs) recursively under DIR
script -r PATH* # yield files matching PATH* and files recursively
# under dirs matching PATH*; if none, call
# on_error(PATH*) callback
Use case #2: like `file -r` (if it had a recursive option)
{files=True, dirs='if-not-recursive', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield DIR, else call on_error(DIR)
script PATH* # yield all files and dirs matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files (not dirs) recursively under DIR
script -r PATH* # yield files matching PATH* and files recursively
# under dirs matching PATH*; if none, call
# on_error(PATH*) callback
Use case #3: kind of like `find .`
{files=True, dirs='always', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield DIR, else call on_error(DIR)
script PATH* # yield all files and dirs matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files and dirs recursively under DIR
# (including DIR)
script -r PATH* # yield files and dirs matching PATH* and recursively
# under dirs; if none, call on_error(PATH*)
# callback
"""
from os.path import basename, exists, isdir, join
from glob import glob
assert not isinstance(path_patterns, basestring), \
"'path_patterns' must be a sequence, not a string: %r" % path_patterns
GLOB_CHARS = '*?['
for path_pattern in path_patterns:
# Determine the set of paths matching this path_pattern.
for glob_char in GLOB_CHARS:
if glob_char in path_pattern:
paths = glob(path_pattern)
break
else:
paths = exists(path_pattern) and [path_pattern] or []
if not paths:
if on_error is None:
pass
elif on_error is _NOT_SPECIFIED:
try:
log.error("`%s': No such file or directory", path_pattern)
except (NameError, AttributeError):
pass
else:
on_error(path_pattern)
for path in paths:
if isdir(path):
# 'includes' SHOULD affect whether a dir is yielded.
if (dirs == "always"
or (dirs == "if-not-recursive" and not recursive)
) and _should_include_path(path, includes, excludes):
yield path
# However, if recursive, 'includes' should NOT affect
# whether a dir is recursed into. Otherwise you could
# not:
# script -r --include="*.py" DIR
if recursive and _should_include_path(path, [], excludes):
for dirpath, dirnames, filenames in os.walk(path):
dir_indeces_to_remove = []
for i, dirname in enumerate(dirnames):
d = join(dirpath, dirname)
if dirs == "always" \
and _should_include_path(d, includes, excludes):
yield d
if not _should_include_path(d, [], excludes):
dir_indeces_to_remove.append(i)
for i in reversed(dir_indeces_to_remove):
del dirnames[i]
if files:
for filename in sorted(filenames):
f = join(dirpath, filename)
if _should_include_path(f, includes, excludes):
yield f
elif files and _should_include_path(path, includes, excludes):
yield path
_g_version = None
def _get_version():
global _g_version
if _g_version is None:
sys.path.insert(0, join(dirname(__file__), "lib"))
try:
import cmdln
_g_version = cmdln.__version__
finally:
del sys.path[0]
return _g_version
def _setup_command_prefix():
prefix = ""
if sys.platform == "darwin":
# http://forums.macosxhints.com/archive/index.php/t-43243.html
# This is an Apple customization to `tar` to avoid creating
# '._foo' files for extended-attributes for archived files.
prefix = "COPY_EXTENDED_ATTRIBUTES_DISABLE=1 "
return prefix
|
|
import time
from test_base import TestCase
class Test_Comment(TestCase):
form_selector = '#pp-popup-comment-form-cont .pp-commform-form'
def start_comment(self):
self.js('pixplus.popup.comment.start()')
self.popup_wait_load()
def delete(self, comment):
dellink = self.q('.delete-comment', comment)
self.move_to(comment)
time.sleep(1)
self.move_to(dellink)
time.sleep(1)
self.click(dellink)
time.sleep(1)
self.alert_accept()
time.sleep(1)
def delete_all(self):
self.open_test_user()
self.b.open(self.js('return pixplus.illust.list[0].link.href'))
end = False
while not end:
end = True
for comment in self.qa('._comment-item, .sticker-item'):
if self.qa('.delete-comment', comment):
end = False
self.delete(comment)
self.reload()
self.assertFalse(self.qa('.delete-comment'))
def test_default_tab(self):
self.open_test_user()
self.set_conf('popup.show_comment_form', True)
self.open_popup()
self.start_comment()
self.click(self.q(self.form_selector + ' .pp-commform-tab-comment'))
self.popup_reload()
self.start_comment()
self.assertTrue(self.qa(self.form_selector + ' .pp-commform-tab-comment.pp-active'))
self.assertTrue(self.qa(self.form_selector + ' .pp-commform-tab-stamp:not(.pp-active)'))
self.assertTrue(self.q(self.form_selector + ' .pp-commform-cont-comment').is_displayed())
self.assertFalse(self.q(self.form_selector + ' .pp-commform-cont-stamp').is_displayed())
self.click(self.q(self.form_selector + ' .pp-commform-tab-stamp'))
self.popup_reload()
self.start_comment()
self.assertTrue(self.qa(self.form_selector + ' .pp-commform-tab-comment:not(.pp-active)'))
self.assertTrue(self.qa(self.form_selector + ' .pp-commform-tab-stamp.pp-active'))
self.assertFalse(self.q(self.form_selector + ' .pp-commform-cont-comment').is_displayed())
self.assertTrue(self.q(self.form_selector + ' .pp-commform-cont-stamp').is_displayed())
self.reload()
self.open_popup()
self.start_comment()
self.assertTrue(self.qa(self.form_selector + ' .pp-commform-tab-comment:not(.pp-active)'))
self.assertTrue(self.qa(self.form_selector + ' .pp-commform-tab-stamp.pp-active'))
self.assertFalse(self.q(self.form_selector + ' .pp-commform-cont-comment').is_displayed())
self.assertTrue(self.q(self.form_selector + ' .pp-commform-cont-stamp').is_displayed())
self.click(self.q(self.form_selector + ' .pp-commform-tab-comment'))
self.reload()
self.open_popup()
self.start_comment()
self.assertTrue(self.qa(self.form_selector + ' .pp-commform-tab-comment.pp-active'))
self.assertTrue(self.qa(self.form_selector + ' .pp-commform-tab-stamp:not(.pp-active)'))
self.assertTrue(self.q(self.form_selector + ' .pp-commform-cont-comment').is_displayed())
self.assertFalse(self.q(self.form_selector + ' .pp-commform-cont-stamp').is_displayed())
def test_write(self):
self.delete_all()
self.open_test_user()
self.set_conf('popup.show_comment_form', True)
self.open_popup()
self.start_comment()
self.click(self.q(self.form_selector + ' .pp-commform-tab-comment'))
comment = self.q(self.form_selector + ' .pp-commform-cont-comment textarea')
message = '__hoge__c_%d' % time.time()
comment.send_keys(message)
self.click(self.q(self.form_selector + ' .pp-commform-send'))
self.popup_wait_load()
xpath = '//*[@id="pp-popup-comment-comments"]//div[contains(concat(" ", @class, " "), " _comment-item ") and .//text()[contains(.,"%s")]]' % message
self.wait_until(lambda driver: self.xa(xpath))
for i in range(10):
self.js('pixplus.popup.reload()')
self.popup_wait_load()
if self.xa(xpath):
break
time.sleep(1)
self.start_comment()
self.delete(self.x(xpath))
self.js('pixplus.popup.reload()')
self.popup_wait_load()
self.assertFalse(self.xa(xpath))
self.assertFalse(self.qa('._comment-item .delete-comment'))
def make_stamp_xpath(self, num):
return '\
//*[@id="pp-popup-comment-comments"]//div[\
contains(concat(" ", @class, " "), " _comment-item ")\
and .//*[contains(concat(" ", @class, " "), " sticker-container ")]\
//img[contains(@src, "/stamps/%d_s.jpg")]]' % num
def write_stamp(self, group, num):
xpath = self.make_stamp_xpath(num)
self.assertFalse(self.xa(xpath))
self.click(self.q(self.form_selector + ' .pp-commform-tab-stamp'))
self.click(self.q(self.form_selector + ' .pp-commform-cont-stamp .pp-commform-tab[data-group="%s"]' % group))
self.click(self.q(self.form_selector + ' .pp-commform-stamp-group[data-group="%s"] img[data-id="%d"]' % (group, num)))
self.wait_until(lambda driver: self.xa(xpath))
def test_write_stamp(self):
self.delete_all()
self.open_test_user()
self.set_conf('popup.show_comment_form', True)
self.open_popup()
self.start_comment()
xpath = self.make_stamp_xpath(408)
self.write_stamp('kitsune', 408)
for i in range(10):
self.js('pixplus.popup.reload()')
self.popup_wait_load()
if self.xa(xpath):
break
time.sleep(1)
self.start_comment()
self.delete(self.x(xpath))
self.js('pixplus.popup.reload()')
self.popup_wait_load()
self.assertFalse(self.xa(xpath))
self.assertFalse(self.qa('._comment-item .delete-comment'))
def check_toggle_form(self, visible):
self.assertEqual(self.get_conf('popup.show_comment_form'), visible)
self.open_popup()
self.start_comment()
comment = self.q('#pp-popup-comment')
form = self.q(self.form_selector)
self.assertTrue(comment.is_displayed())
self.assertEqual(form.is_displayed(), visible)
self.click(self.q('#pp-popup-comment-form-btn'))
self.assertEqual(self.get_conf('popup.show_comment_form'), visible)
self.assertTrue(comment.is_displayed())
self.assertEqual(form.is_displayed(), not visible)
def test_toggle_form(self):
self.open_test_user()
self.set_conf('popup.show_comment_form', False)
self.check_toggle_form(False)
self.reload()
self.set_conf('popup.show_comment_form', True)
self.check_toggle_form(True)
def test_cogwheel(self):
self.delete_all()
self.open_test_user()
self.set_conf('popup.show_comment_form', True)
self.open_popup()
self.start_comment()
stamp_xpath = self.make_stamp_xpath(209)
self.write_stamp('moemusume', 209)
self.assertTrue(self.x(stamp_xpath).is_displayed())
self.open_test_user()
self.set_conf('popup.show_comment_form', False)
self.set_conf('popup.hide_stamp_comments', False)
self.open_popup()
self.start_comment()
self.assertTrue(self.x(stamp_xpath).is_displayed())
btn = self.q('#pp-popup-comment-config-btn')
x, y, w, h, r, b = self.geom2(btn)
self.click(btn)
time.sleep(1)
menu = self.q('.pp-popup-menu')
self.assertTrue(menu.is_displayed())
self.assertEqual(self.geom(menu)[:2], (x, b))
sel_show_comment_form = '.pp-popup-menu-item[data-name="popup_show_comment_form"] input[type="checkbox"]'
sel_hide_stamp_comments = '.pp-popup-menu-item[data-name="popup_hide_stamp_comments"] input[type="checkbox"]'
self.assertFalse(self.q(self.form_selector).is_displayed())
self.assertFalse(self.q(sel_show_comment_form).is_selected())
self.assertFalse(self.q(sel_hide_stamp_comments).is_selected())
self.click(self.q(sel_show_comment_form))
time.sleep(1)
self.assertTrue(self.q(self.form_selector).is_displayed())
self.assertEqual(self.get_conf('popup.show_comment_form'), True)
self.assertEqual(self.get_conf('popup.hide_stamp_comments'), False)
self.assertFalse(self.qa('.pp-popup-menu'))
self.assertTrue(self.x(stamp_xpath).is_displayed())
self.click(btn)
time.sleep(1)
self.assertTrue(self.q(sel_show_comment_form).is_selected())
self.assertFalse(self.q(sel_hide_stamp_comments).is_selected())
self.click(self.q(sel_hide_stamp_comments))
time.sleep(1)
self.assertEqual(self.get_conf('popup.show_comment_form'), True)
self.assertEqual(self.get_conf('popup.hide_stamp_comments'), True)
self.assertFalse(self.qa('.pp-popup-menu'))
self.assertFalse(self.x(stamp_xpath).is_displayed())
self.click(btn)
time.sleep(1)
self.assertTrue(self.q(sel_show_comment_form).is_selected())
self.assertTrue(self.q(sel_hide_stamp_comments).is_selected())
self.click(self.q(sel_show_comment_form))
time.sleep(1)
self.assertTrue(self.q(self.form_selector).is_displayed())
self.assertEqual(self.get_conf('popup.show_comment_form'), False)
self.assertEqual(self.get_conf('popup.hide_stamp_comments'), True)
self.assertFalse(self.qa('.pp-popup-menu'))
self.assertFalse(self.x(stamp_xpath).is_displayed())
self.click(btn)
time.sleep(1)
self.assertFalse(self.q(sel_show_comment_form).is_selected())
self.assertTrue(self.q(sel_hide_stamp_comments).is_selected())
self.click(self.q(sel_hide_stamp_comments))
time.sleep(1)
self.assertTrue(self.q(self.form_selector).is_displayed())
self.assertEqual(self.get_conf('popup.show_comment_form'), False)
self.assertEqual(self.get_conf('popup.hide_stamp_comments'), False)
self.assertFalse(self.qa('.pp-popup-menu'))
self.assertTrue(self.x(stamp_xpath).is_displayed())
self.click(btn)
time.sleep(1)
self.assertFalse(self.q(sel_show_comment_form).is_selected())
self.assertFalse(self.q(sel_hide_stamp_comments).is_selected())
def test_reply(self):
xpath = '//*[@id="pp-popup-comment-comments"]//div[\
contains(concat(" ", @class, " "), " _comment-item ") and \
.//*[contains(concat(" ", @class, " "), " reply ")]\
]'
self.open_test_user()
self.find_illust(lambda i: self.xa(xpath))
self.start_comment()
item = self.x(xpath)
to_id = item.get_attribute('data-id')
to_name = self.q('.comment .meta .user-name', item).text
self.click(self.q('.reply', item))
self.popup_wait_load()
comment = self.q(self.form_selector + ' .pp-commform-cont-comment textarea')
message = '__hoge__c_%d' % time.time()
comment.send_keys(message)
self.click(self.q(self.form_selector + ' .pp-commform-send'))
self.popup_wait_load()
xpath = '//*[@id="pp-popup-comment-comments"]//div[contains(concat(" ", @class, " "), " _comment-item ") and .//text()[contains(.,"%s")]]' % message
self.wait_until(lambda driver: self.xa(xpath))
self.assertTrue(self.qa('.comment .reply-to', self.x(xpath)))
link = self.q('.comment .reply-to', self.x(xpath))
self.assertEqual(link.get_attribute('data-id'), to_id)
self.assertIn('> %s' % to_name, link.text)
|
|
from rpython.flowspace.model import Constant
from rpython.rtyper.lltypesystem import lltype, llmemory, rdict
from rpython.rtyper.lltypesystem.llmemory import weakref_create, weakref_deref
from rpython.rtyper import rclass
from rpython.rtyper.rclass import getinstancerepr
from rpython.rtyper.rmodel import Repr
from rpython.rlib.rweakref import RWeakKeyDictionary
from rpython.rlib import jit
from rpython.rlib.objectmodel import compute_identity_hash
# Warning: this implementation of RWeakKeyDictionary is not exactly
# leaking, but can keep around some values for a long time, even after
# the corresponding keys were freed. They will be eventually freed if
# you continue to manipulate the dictionary. Avoid to use this if the
# values are objects that might keep alive tons of memory.
class WeakKeyDictRepr(Repr):
def __init__(self, rtyper):
self.rtyper = rtyper
self.lowleveltype = lltype.Ptr(WEAKDICT)
self.dict_cache = {}
def convert_const(self, weakdict):
if not isinstance(weakdict, RWeakKeyDictionary):
raise TyperError("expected an RWeakKeyDictionary: %r" % (
weakdict,))
try:
key = Constant(weakdict)
return self.dict_cache[key]
except KeyError:
self.setup()
if weakdict.length() != 0:
raise TyperError("got a non-empty prebuilt RWeakKeyDictionary")
l_dict = ll_new_weakdict()
self.dict_cache[key] = l_dict
return l_dict
def rtype_method_get(self, hop):
r_object = getinstancerepr(self.rtyper, None)
v_d, v_key = hop.inputargs(self, r_object)
hop.exception_cannot_occur()
v_result = hop.gendirectcall(ll_get, v_d, v_key)
v_result = hop.genop("cast_pointer", [v_result],
resulttype=hop.r_result.lowleveltype)
return v_result
def rtype_method_set(self, hop):
r_object = getinstancerepr(self.rtyper, None)
v_d, v_key, v_value = hop.inputargs(self, r_object, r_object)
hop.exception_cannot_occur()
if hop.args_s[2].is_constant() and hop.args_s[2].const is None:
hop.gendirectcall(ll_set_null, v_d, v_key)
else:
hop.gendirectcall(ll_set, v_d, v_key, v_value)
def rtype_method_length(self, hop):
v_d, = hop.inputargs(self)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_length, v_d)
def specialize_make_weakdict(hop):
hop.exception_cannot_occur()
v_d = hop.gendirectcall(ll_new_weakdict)
return v_d
# ____________________________________________________________
NULLVALUE = lltype.nullptr(rclass.OBJECTPTR.TO)
WEAKDICTENTRY = lltype.Struct("weakdictentry",
("key", llmemory.WeakRefPtr),
("value", rclass.OBJECTPTR),
("f_hash", lltype.Signed))
def ll_debugrepr(x):
if x:
h = compute_identity_hash(x)
else:
h = 0
return '<%x>' % (h,)
def ll_valid(entries, i):
key = entries[i].key
if not key:
return False
elif weakref_deref(rclass.OBJECTPTR, key):
return True
else:
# The entry might be a dead weakref still holding a strong
# reference to the value; for this case, we clear the old
# value from the entry, if any.
entries[i].value = NULLVALUE
return False
def ll_everused(entries, i):
return bool(entries[i].key)
entrymeths = {
'allocate': lltype.typeMethod(rdict._ll_malloc_entries),
'delete': rdict._ll_free_entries,
'valid': ll_valid,
'everused': ll_everused,
'hash': rdict.ll_hash_from_cache,
'no_direct_compare': True,
}
WEAKDICTENTRYARRAY = lltype.GcArray(WEAKDICTENTRY,
adtmeths=entrymeths,
hints={'weakarray': 'key'})
# NB. the 'hints' is not used so far ^^^
@jit.dont_look_inside
def ll_new_weakdict():
d = lltype.malloc(WEAKDICT)
d.entries = WEAKDICT.entries.TO.allocate(rdict.DICT_INITSIZE)
d.num_items = 0
d.resize_counter = rdict.DICT_INITSIZE * 2
return d
@jit.dont_look_inside
def ll_get(d, llkey):
hash = compute_identity_hash(llkey)
i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK
#llop.debug_print(lltype.Void, i, 'get', hex(hash),
# ll_debugrepr(d.entries[i].key),
# ll_debugrepr(d.entries[i].value))
# NB. ll_valid() above was just called at least on entry i, so if
# it is an invalid entry with a dead weakref, the value was reset
# to NULLVALUE.
return d.entries[i].value
@jit.dont_look_inside
def ll_set(d, llkey, llvalue):
if llvalue:
ll_set_nonnull(d, llkey, llvalue)
else:
ll_set_null(d, llkey)
@jit.dont_look_inside
def ll_set_nonnull(d, llkey, llvalue):
hash = compute_identity_hash(llkey)
keyref = weakref_create(llkey) # GC effects here, before the rest
i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK
everused = d.entries.everused(i)
d.entries[i].key = keyref
d.entries[i].value = llvalue
d.entries[i].f_hash = hash
#llop.debug_print(lltype.Void, i, 'stored', hex(hash),
# ll_debugrepr(llkey),
# ll_debugrepr(llvalue))
if not everused:
d.resize_counter -= 3
if d.resize_counter <= 0:
#llop.debug_print(lltype.Void, 'RESIZE')
ll_weakdict_resize(d)
@jit.dont_look_inside
def ll_set_null(d, llkey):
hash = compute_identity_hash(llkey)
i = rdict.ll_dict_lookup(d, llkey, hash) & rdict.MASK
if d.entries.everused(i):
# If the entry was ever used, clean up its key and value.
# We don't store a NULL value, but a dead weakref, because
# the entry must still be marked as everused().
d.entries[i].key = llmemory.dead_wref
d.entries[i].value = NULLVALUE
#llop.debug_print(lltype.Void, i, 'zero')
def ll_update_num_items(d):
entries = d.entries
num_items = 0
for i in range(len(entries)):
if entries.valid(i):
num_items += 1
d.num_items = num_items
def ll_weakdict_resize(d):
# first set num_items to its correct, up-to-date value
ll_update_num_items(d)
rdict.ll_dict_resize(d)
def ll_keyeq(d, weakkey1, realkey2):
# only called by ll_dict_lookup() with the first arg coming from an
# entry.key, and the 2nd arg being the argument to ll_dict_lookup().
if not weakkey1:
assert bool(realkey2)
return False
return weakref_deref(rclass.OBJECTPTR, weakkey1) == realkey2
@jit.dont_look_inside
def ll_length(d):
# xxx slow, but it's only for debugging
ll_update_num_items(d)
#llop.debug_print(lltype.Void, 'length:', d.num_items)
return d.num_items
dictmeths = {
'll_get': ll_get,
'll_set': ll_set,
'keyeq': ll_keyeq,
'paranoia': False,
}
WEAKDICT = lltype.GcStruct("weakkeydict",
("num_items", lltype.Signed),
("resize_counter", lltype.Signed),
("entries", lltype.Ptr(WEAKDICTENTRYARRAY)),
adtmeths=dictmeths)
|
|
#!/usr/bin/env python
import random
import os_ident
import uncrc32
try: import pcap as pcapy
except: import pcapy
from impacket import ImpactPacket
from impacket import ImpactDecoder
from impacket.ImpactPacket import TCPOption
from impacket.examples import logger
#defaults
MAC = "01:02:03:04:05:06"
IP = "192.168.67.254"
IFACE = "eth0"
OPEN_TCP_PORTS = [80, 443]
OPEN_UDP_PORTS = [111]
UDP_CMD_PORT = 12345
nmapOSDB = '/usr/share/nmap/nmap-os-db'
# Fingerprint = 'Adtran NetVanta 3200 router' # CD=Z TOSI=Z <----------- NMAP detects it as Linux!!!
# Fingerprint = 'ADIC Scalar 1000 tape library remote management unit' # DFI=S
# Fingerprint = 'Siemens Gigaset SX541 or USRobotics USR9111 wireless DSL modem' # DFI=O U1(DF=N IPL=38)
# Fingerprint = 'Apple Mac OS X 10.5.6 (Leopard) (Darwin 9.6.0)' # DFI=Y SI=S U1(DF=Y)
Fingerprint = 'Sun Solaris 10 (SPARC)'
# Fingerprint = 'Sun Solaris 9 (x86)'
# Fingerprint = '3Com OfficeConnect 3CRWER100-75 wireless broadband router' # TI=Z DFI=N !SS TI=Z II=I
# Fingerprint = 'WatchGuard Firebox X5w firewall/WAP' # TI=RD
# no TI=Hex
# Fingerprint = 'FreeBSD 6.0-STABLE - 6.2-RELEASE' # TI=RI
# Fingerprint = 'Microsoft Windows 98 SE' # TI=BI ----> BROKEN! nmap shows no SEQ() output
# Fingerprint = 'Microsoft Windows NT 4.0 SP5 - SP6' # TI=BI TOSI=S SS=S
# Fingerprint = 'Microsoft Windows Vista Business' # TI=I U1(IPL=164)
# Fingerprint = 'FreeBSD 6.1-RELEASE' # no TI (TI=O)
# Fingerprint = '2Wire 1701HG wireless ADSL modem' # IE(R=N)
# Fingerprint = 'Cisco Catalyst 1912 switch' # TOSI=O SS=S
O_ETH = 0
O_IP = 1
O_ARP = 1
O_UDP = 2
O_TCP = 2
O_ICMP = 2
O_UDP_DATA = 3
O_ICMP_DATA = 3
def string2tuple(string):
if string.find(':') >= 0:
return [int(x) for x in string.split(':')]
else:
return [int(x) for x in string.split('.')]
class Responder:
templateClass = None
signatureName = None
def __init__(self, machine):
self.machine = machine
print "Initializing %s" % self.__class__.__name__
self.initTemplate()
self.initFingerprint()
def initTemplate(self):
if not self.templateClass:
self.template_onion = None
else:
try:
probe = self.templateClass(0, ['0.0.0.0',self.getIP()],[0, 0])
except:
probe = self.templateClass(0, ['0.0.0.0',self.getIP()])
self.template_onion = [probe.get_packet()]
try:
while 1: self.template_onion.append(self.template_onion[-1].child())
except: pass
# print "Template: %s" % self.template_onion[O_ETH]
# print "Options: %r" % self.template_onion[O_TCP].get_padded_options()
# print "Flags: 0x%04x" % self.template_onion[O_TCP].get_th_flags()
def initFingerprint(self):
if not self.signatureName:
self.fingerprint = None
else:
self.fingerprint = self.machine.fingerprint.get_tests()[self.signatureName].copy()
def isMine(self, in_onion):
return False
def buildAnswer(self, in_onion):
return None
def sendAnswer(self, out_onion):
self.machine.sendPacket(out_onion)
def process(self, in_onion):
if not self.isMine(in_onion): return False
print "Got packet for %s" % self.__class__.__name__
out_onion = self.buildAnswer(in_onion)
if out_onion: self.sendAnswer(out_onion)
return True
def getIP(self):
return self.machine.ipAddress
# Generic Responders (does the word Responder exist?)
class ARPResponder(Responder):
def isMine(self, in_onion):
if len(in_onion) < 2: return False
if in_onion[O_ARP].ethertype != ImpactPacket.ARP.ethertype:
return False
return (
in_onion[O_ARP].get_ar_op() == 1 and # ARP REQUEST
in_onion[O_ARP].get_ar_tpa() == string2tuple(self.machine.ipAddress))
def buildAnswer(self, in_onion):
eth = ImpactPacket.Ethernet()
arp = ImpactPacket.ARP()
eth.contains(arp)
arp.set_ar_hrd(1) # Hardward type Ethernet
arp.set_ar_pro(0x800) # IP
arp.set_ar_op(2) # REPLY
arp.set_ar_hln(6)
arp.set_ar_pln(4)
arp.set_ar_sha(string2tuple(self.machine.macAddress))
arp.set_ar_spa(string2tuple(self.machine.ipAddress))
arp.set_ar_tha(in_onion[O_ARP].get_ar_sha())
arp.set_ar_tpa(in_onion[O_ARP].get_ar_spa())
eth.set_ether_shost(arp.get_ar_sha())
eth.set_ether_dhost(arp.get_ar_tha())
return [eth, arp]
class IPResponder(Responder):
def buildAnswer(self, in_onion):
eth = ImpactPacket.Ethernet()
ip = ImpactPacket.IP()
eth.contains(ip)
eth.set_ether_shost(in_onion[O_ETH].get_ether_dhost())
eth.set_ether_dhost(in_onion[O_ETH].get_ether_shost())
ip.set_ip_src(in_onion[O_IP].get_ip_dst())
ip.set_ip_dst(in_onion[O_IP].get_ip_src())
ip.set_ip_id(self.machine.getIPID())
return [eth, ip]
def sameIPFlags(self, in_onion):
if not self.template_onion: return True
return (self.template_onion[O_IP].get_ip_off() & 0xe000) == (in_onion[O_IP].get_ip_off() & 0xe000)
def isMine(self, in_onion):
if len(in_onion) < 2: return False
return (
(in_onion[O_IP].ethertype == ImpactPacket.IP.ethertype) and
(in_onion[O_IP].get_ip_dst() == self.machine.ipAddress) and
self.sameIPFlags(in_onion)
)
def setTTLFromFingerprint(self, out_onion):
f = self.fingerprint
# Test T: Initial TTL = range_low-range_hi, base 16
# Assumption: we are using the minimum in the TTL range
try:
ttl = f['T'].split('-')
ttl = int(ttl[0], 16)
except:
ttl = 0x7f
# Test TG: Initial TTL Guess. It's just a number, we prefer this
try: ttl = int(f['TG'], 16)
except: pass
out_onion[O_IP].set_ip_ttl(ttl)
class ICMPResponder(IPResponder):
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
icmp = ImpactPacket.ICMP()
out_onion[O_IP].contains(icmp)
out_onion.append(icmp)
icmp.set_icmp_id(in_onion[O_ICMP].get_icmp_id())
icmp.set_icmp_seq(in_onion[O_ICMP].get_icmp_seq())
out_onion[O_IP].set_ip_id(self.machine.getIPID_ICMP())
return out_onion
def isMine(self, in_onion):
if not IPResponder.isMine(self, in_onion): return False
if len(in_onion) < 3: return False
return (
(in_onion[O_ICMP].protocol == ImpactPacket.ICMP.protocol) and
self.sameICMPTemplate(in_onion))
def sameICMPTemplate(self, in_onion):
t_ip = self.template_onion[O_IP]
t_icmp = self.template_onion[O_ICMP]
t_icmp_datalen = self.template_onion[O_ICMP_DATA].get_size()
return (
(t_ip.get_ip_tos() == in_onion[O_IP].get_ip_tos()) and
(t_ip.get_ip_df() == in_onion[O_IP].get_ip_df()) and
(t_icmp.get_icmp_type() == in_onion[O_ICMP].get_icmp_type()) and
(t_icmp.get_icmp_code() == in_onion[O_ICMP].get_icmp_code()) and
(t_icmp_datalen == in_onion[O_ICMP_DATA].get_size())
)
class UDPResponder(IPResponder):
def isMine(self, in_onion):
return (
IPResponder.isMine(self, in_onion) and
(len(in_onion) >= 3) and
(in_onion[O_UDP].protocol == ImpactPacket.UDP.protocol)
)
class OpenUDPResponder(UDPResponder):
def isMine(self, in_onion):
return (
UDPResponder.isMine(self, in_onion) and
self.machine.isUDPPortOpen(in_onion[O_UDP].get_uh_dport()))
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
udp = ImpactPacket.UDP()
out_onion[O_IP].contains(udp)
out_onion.append(udp)
udp.set_uh_dport(in_onion[O_UDP].get_uh_sport())
udp.set_uh_sport(in_onion[O_UDP].get_uh_dport())
return out_onion
class ClosedUDPResponder(UDPResponder):
def isMine(self, in_onion):
return (
UDPResponder.isMine(self, in_onion) and
not self.machine.isUDPPortOpen(in_onion[O_UDP].get_uh_dport()))
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
icmp = ImpactPacket.ICMP()
out_onion[O_IP].contains(icmp)
out_onion.append(icmp)
icmp.contains(in_onion[O_IP])
out_onion += in_onion[O_IP:]
icmp.set_icmp_type(icmp.ICMP_UNREACH)
icmp.set_icmp_code(icmp.ICMP_UNREACH_PORT)
return out_onion
class TCPResponder(IPResponder):
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
tcp = ImpactPacket.TCP()
out_onion[O_IP].contains(tcp)
out_onion.append(tcp)
tcp.set_th_dport(in_onion[O_TCP].get_th_sport())
tcp.set_th_sport(in_onion[O_TCP].get_th_dport())
return out_onion
def sameTCPFlags(self, in_onion):
if not self.template_onion: return True
in_flags = in_onion[O_TCP].get_th_flags() & 0xfff
t_flags = self.template_onion[O_TCP].get_th_flags() & 0xfff
return in_flags == t_flags
def sameTCPOptions(self, in_onion):
if not self.template_onion: return True
in_options = in_onion[O_TCP].get_padded_options()
t_options = self.template_onion[O_TCP].get_padded_options()
return in_options == t_options
def isMine(self, in_onion):
if not IPResponder.isMine(self, in_onion): return False
if len(in_onion) < 3: return False
return (
in_onion[O_TCP].protocol == ImpactPacket.TCP.protocol and
self.sameTCPFlags(in_onion) and
self.sameTCPOptions(in_onion)
)
class OpenTCPResponder(TCPResponder):
def isMine(self, in_onion):
return (
TCPResponder.isMine(self, in_onion) and
in_onion[O_TCP].get_SYN() and
self.machine.isTCPPortOpen(in_onion[O_TCP].get_th_dport()))
def buildAnswer(self, in_onion):
out_onion = TCPResponder.buildAnswer(self, in_onion)
out_onion[O_TCP].set_SYN()
out_onion[O_TCP].set_ACK()
out_onion[O_TCP].set_th_ack(in_onion[O_TCP].get_th_seq()+1)
out_onion[O_TCP].set_th_seq(self.machine.getTCPSequence())
return out_onion
class ClosedTCPResponder(TCPResponder):
def isMine(self, in_onion):
return (
TCPResponder.isMine(self, in_onion) and
in_onion[O_TCP].get_SYN() and
not self.machine.isTCPPortOpen(in_onion[O_TCP].get_th_dport()))
def buildAnswer(self, in_onion):
out_onion = TCPResponder.buildAnswer(self, in_onion)
out_onion[O_TCP].set_RST()
out_onion[O_TCP].set_ACK()
out_onion[O_TCP].set_th_ack(in_onion[O_TCP].get_th_seq()+1)
out_onion[O_TCP].set_th_seq(self.machine.getTCPSequence())
return out_onion
class UDPCommandResponder(OpenUDPResponder):
# default UDP_CMD_PORT is 12345
# use with:
# echo cmd:exit | nc -u $(IP) $(UDP_CMD_PORT)
# echo cmd:who | nc -u $(IP) $(UDP_CMD_PORT)
def set_port(self, port):
self.port = port
self.machine.openUDPPort(port)
return self
def isMine(self, in_onion):
return (
OpenUDPResponder.isMine(self, in_onion))# and
#in_onion[O_UDP].get_uh_dport() == self.port)
def buildAnswer(self, in_onion):
cmd = in_onion[O_UDP_DATA].get_bytes().tostring()
if cmd[:4] == 'cmd:': cmd = cmd[4:].strip()
print "Got command: %r" % cmd
if cmd == 'exit':
from sys import exit
exit()
out_onion = OpenUDPResponder.buildAnswer(self, in_onion)
out_onion.append(ImpactPacket.Data())
out_onion[O_UDP].contains(out_onion[O_UDP_DATA])
if cmd == 'who':
out_onion[O_UDP_DATA].set_data(self.machine.fingerprint.get_id())
return out_onion
# NMAP2 specific responders
class NMAP2UDPResponder(ClosedUDPResponder):
signatureName = 'U1'
# No real need to filter
# def isMine(self, in_onion):
# return (
# ClosedUDPResponder.isMine(self, inOnion) and
# (in_onion[O_UDP_DATA].get_size() == 300))
def buildAnswer(self, in_onion):
out_onion = ClosedUDPResponder.buildAnswer(self, in_onion)
f = self.fingerprint
# assume R = Y
try:
if (f['R'] == 'N'): return None
except: pass
# Test DF: Don't fragment IP bit set = [YN]
if (f['DF'] == 'Y'): out_onion[O_IP].set_ip_df(True)
else: out_onion[O_IP].set_ip_df(False)
self.setTTLFromFingerprint(out_onion)
# UN. Assume 0
try: un = int(f['UN'],16)
except: un = 0
out_onion[O_ICMP].set_icmp_void(un)
# RIPL. Assume original packet just quoted
try:
ripl = int(f['RIPL'],16) # G generates exception
out_onion[O_ICMP_DATA].set_ip_len(ripl)
except:
pass
# RID. Assume original packet just quoted
try:
rid = int(f['RID'],16) # G generates exception
out_onion[O_ICMP_DATA].set_ip_id(rid)
except:
pass
# RIPCK. Assume original packet just quoted
try: ripck = f['RIPCK']
except: ripck = 'G'
if ripck == 'I': out_onion[O_ICMP_DATA].set_ip_sum(0x6765)
elif ripck == 'Z': out_onion[O_ICMP_DATA].set_ip_sum(0)
elif ripck == 'G': out_onion[O_ICMP_DATA].auto_checksum = 0
# RUCK. Assume original packet just quoted
try:
ruck = int(f['RUCK'], 16)
out_onion[O_ICMP_DATA+1].set_uh_sum(ruck)
except:
out_onion[O_ICMP_DATA+1].auto_checksum = 0
# RUD. Assume original packet just quoted
try: rud = f['RUD']
except: rud = 'G'
if rud == 'I':
udp_data = out_onion[O_ICMP_DATA+2]
udp_data.set_data('G'*udp_data.get_size())
# IPL. Assume all original packet is quoted
# This has to be the last thing we do
# as we are going to render the packet before doing it
try: ipl = int(f['IPL'], 16)
except: ipl = None
if not ipl is None:
data = out_onion[O_ICMP_DATA].get_packet()
out_onion[O_ICMP].contains(ImpactPacket.Data())
ip_and_icmp_len = out_onion[O_IP].get_size()
data = data[:ipl - ip_and_icmp_len]
data += '\x00'*(ipl-len(data)-ip_and_icmp_len)
out_onion = out_onion[:O_ICMP_DATA]
out_onion.append(ImpactPacket.Data(data))
out_onion[O_ICMP].contains(out_onion[O_ICMP_DATA])
return out_onion
class NMAP2ICMPResponder(ICMPResponder):
def buildAnswer(self, in_onion):
f = self.fingerprint
# assume R = Y
try:
if (f['R'] == 'N'): return None
except: pass
out_onion = ICMPResponder.buildAnswer(self, in_onion)
# assume DFI = N
try: dfi = f['DFI']
except: dfi = 'N'
if dfi == 'N': out_onion[O_IP].set_ip_df(False)
elif dfi == 'Y': out_onion[O_IP].set_ip_df(True)
elif dfi == 'S': out_onion[O_IP].set_ip_df(in_onion[O_IP].get_ip_df())
elif dfi == 'O': out_onion[O_IP].set_ip_df(not in_onion[O_IP].get_ip_df())
else: raise Exception('Unsupported IE(DFI=%s)' % dfi)
# assume DLI = S
try: dli = f['DLI']
except: dli = 'S'
if dli == 'S': out_onion[O_ICMP].contains(in_onion[O_ICMP_DATA])
elif dli != 'Z': raise Exception('Unsupported IE(DFI=%s)' % dli)
self.setTTLFromFingerprint(out_onion)
# assume SI = S
try: si = f['SI']
except: si = 'S'
if si == 'S': out_onion[O_ICMP].set_icmp_seq(in_onion[O_ICMP].get_icmp_seq())
elif si == 'Z': out_onion[O_ICMP].set_icmp_seq(0) # this is not currently supported by nmap, but I've done it already
else:
try: out_onion[O_ICMP].set_icmp_seq(int(si, 16)) # this is not supported either by nmap
except: raise Exception('Unsupported IE(SI=%s)' % si)
# assume CD = S
try: cd = f['CD']
except: cd = 'S'
if cd == 'Z': out_onion[O_ICMP].set_icmp_code(0)
elif cd == 'S': out_onion[O_ICMP].set_icmp_code(in_onion[O_ICMP].get_icmp_code())
elif cd == 'O': out_onion[O_ICMP].set_icmp_code(in_onion[O_ICMP].get_icmp_code()+1) # no examples in DB
else:
try: out_onion[O_ICMP].set_icmp_code(int(cd, 16)) # documented, but no examples available
except: raise Exception('Unsupported IE(CD=%s)' % cd)
# assume TOSI = S
try: tosi = f['TOSI']
except: tosi = 'S'
if tosi == 'Z': out_onion[O_IP].set_ip_tos(0)
elif tosi == 'S': out_onion[O_IP].set_ip_tos(in_onion[O_IP].get_ip_tos())
elif tosi == 'O': out_onion[O_IP].set_ip_tos(in_onion[O_IP].get_ip_tos()+1) # no examples in DB
else:
try: out_onion[O_IP].set_ip_tos(int(tosi, 16)) # documented, but no examples available
except: raise Exception('Unsupported IE(TOSI=%s)' % tosi)
return out_onion
class NMAP2TCPResponder(TCPResponder):
def buildAnswer(self, in_onion):
out_onion = TCPResponder.buildAnswer(self, in_onion)
f = self.fingerprint
# Test R: There is a response = [YN]
if (f['R'] == 'N'): return None
# Test DF: Don't fragment IP bit set = [YN]
if (f['DF'] == 'Y'): out_onion[O_IP].set_ip_df(True)
else: out_onion[O_IP].set_ip_df(False)
# Test W: Initial TCP windows size
try: win = int(f['W'],16)
except: win = 0
out_onion[O_TCP].set_th_win(win)
self.setTTLFromFingerprint(out_onion)
# Test CC: Explicit congestion notification
# Two TCP flags are used in this test: ECE and CWR
try:
cc = f['CC']
if cc == 'N': ece,cwr = 0,0
if cc == 'Y': ece,cwr = 1,0
if cc == 'S': ece,cwr = 1,1
if cc == 'O': ece,cwr = 0,1
except:
ece,cwr = 0,0
if ece: out_onion[O_TCP].set_ECE()
else: out_onion[O_TCP].reset_ECE()
if cwr: out_onion[O_TCP].set_CWR()
else: out_onion[O_TCP].reset_CWR()
# Test O: TCP Options
try: options = f['O']
except: options = ''
self.setTCPOptions(out_onion, options)
# Test S: TCP Sequence number
# Z: Sequence number is zero
# A: Sequence number is the same as the ACK in the probe
# A+: Sequence number is the same as the ACK in the probe + 1
# O: Other value
try: s = f['S']
except: s = 'O'
if s == 'Z': out_onion[O_TCP].set_th_seq(0)
if s == 'A': out_onion[O_TCP].set_th_seq(in_onion[O_TCP].get_th_ack())
if s == 'A+': out_onion[O_TCP].set_th_seq(in_onion[O_TCP].get_th_ack()+1)
if s == 'O': out_onion[O_TCP].set_th_seq(self.machine.getTCPSequence())
# Test A: TCP ACK number
# Z: Ack is zero
# S: Ack is the same as the Squence number in the probe
# S+: Ack is the same as the Squence number in the probe + 1
# O: Other value
try: a = f['A']
except: a = 'O'
if a == 'Z': out_onion[O_TCP].set_th_ack(0)
if a == 'S': out_onion[O_TCP].set_th_ack(in_onion[O_TCP].get_th_seq())
if a == 'S+': out_onion[O_TCP].set_th_ack(in_onion[O_TCP].get_th_seq()+1)
# Test Q: Quirks
# R: Reserved bit set (right after the header length)
# U: Urgent pointer non-zero and URG flag clear
try:
if 'R' in f['Q']: out_onion[O_TCP].set_flags(0x800)
except: pass
try:
if 'U' in f['Q']: out_onion[O_TCP].set_th_urp(0xffff)
except: pass
# Test F: TCP Flags
try: flags = f['F']
except: flags = ''
if 'E' in flags: out_onion[O_TCP].set_ECE()
if 'U' in flags: out_onion[O_TCP].set_URG()
if 'A' in flags: out_onion[O_TCP].set_ACK()
if 'P' in flags: out_onion[O_TCP].set_PSH()
if 'R' in flags: out_onion[O_TCP].set_RST()
if 'S' in flags: out_onion[O_TCP].set_SYN()
if 'F' in flags: out_onion[O_TCP].set_FIN()
# Test RD: TCP Data checksum (mostly for data in RST)
try:
crc = f['RD']
if crc != '0': # when the
crc = int(crc, 16)
data = 'TCP Port is closed\x00'
data += uncrc32.compensate(data, crc)
data = ImpactPacket.Data(data)
out_onion.append(data)
out_onion[O_TCP].contains(data)
except:
pass
return out_onion
def setTCPOptions(self, onion, options):
def getValue(string, i):
value = 0
idx = i
for c in options[i:]:
try:
value = value * 0x10 + int(c,16)
except:
break
idx += 1
return value, idx
# Test O,O1=O6: TCP Options
# L: End of Options
# N: NOP
# S: Selective ACK
# Mx: MSS (x is a hex number)
# Wx: Windows Scale (x is a hex number)
# Tve: Timestamp (v and e are two binary digits, v for TSval and e for TSecr
i = 0
tcp = onion[O_TCP]
while i < len(options):
opt = options[i]
i += 1
if opt == 'L': tcp.add_option(TCPOption(TCPOption.TCPOPT_EOL))
if opt == 'N': tcp.add_option(TCPOption(TCPOption.TCPOPT_NOP))
if opt == 'S': tcp.add_option(TCPOption(TCPOption.TCPOPT_SACK_PERMITTED))
if opt == 'T':
opt = TCPOption(TCPOption.TCPOPT_TIMESTAMP) # default ts = 0, ts_echo = 0
if options[i] == '1': opt.set_ts(self.machine.getTCPTimeStamp())
if options[i+1] == '1': opt.set_ts_echo(0xffffffffL)
tcp.add_option(opt)
i += 2
if opt == 'M':
maxseg, i = getValue(options, i)
tcp.add_option(TCPOption(TCPOption.TCPOPT_MAXSEG, maxseg))
if opt == 'W':
window, i = getValue(options, i)
tcp.add_option(TCPOption(TCPOption.TCPOPT_WINDOW, window))
class nmap2_SEQ(NMAP2TCPResponder):
templateClass = None
signatureName = None
seqNumber = None
def initFingerprint(self):
NMAP2TCPResponder.initFingerprint(self)
if not self.seqNumber: return
else:
OPS = self.machine.fingerprint.get_tests()['OPS']
WIN = self.machine.fingerprint.get_tests()['WIN']
self.fingerprint['O'] = OPS['O%d' % self.seqNumber]
self.fingerprint['W'] = WIN['W%d' % self.seqNumber]
class nmap2_ECN(NMAP2TCPResponder):
templateClass = os_ident.nmap2_ecn_probe
signatureName = 'ECN'
class nmap2_SEQ1(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_1
signatureName = 'T1'
seqNumber = 1
class nmap2_SEQ2(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_2
signatureName = 'T1'
seqNumber = 2
class nmap2_SEQ3(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_3
signatureName = 'T1'
seqNumber = 3
class nmap2_SEQ4(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_4
signatureName = 'T1'
seqNumber = 4
class nmap2_SEQ5(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_5
signatureName = 'T1'
seqNumber = 5
class nmap2_SEQ6(nmap2_SEQ):
templateClass = os_ident.nmap2_seq_6
signatureName = 'T1'
seqNumber = 6
class nmap2_T2(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_open_2
signatureName = 'T2'
class nmap2_T3(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_open_3
signatureName = 'T3'
class nmap2_T4(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_open_4
signatureName = 'T4'
class nmap2_T5(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_closed_1
signatureName = 'T5'
class nmap2_T6(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_closed_2
signatureName = 'T6'
class nmap2_T7(NMAP2TCPResponder):
templateClass = os_ident.nmap2_tcp_closed_3
signatureName = 'T7'
class nmap2_ICMP_1(NMAP2ICMPResponder):
templateClass = os_ident.nmap2_icmp_echo_probe_1
signatureName = 'IE'
class nmap2_ICMP_2(NMAP2ICMPResponder):
templateClass = os_ident.nmap2_icmp_echo_probe_2
signatureName = 'IE'
class Machine:
AssumedTimeIntervalPerPacket = 0.11 # seconds
def __init__(self, emmulating, interface, ipAddress, macAddress, openTCPPorts = [], openUDPPorts = [], nmapOSDB = 'nmap-os-db'):
self.interface = interface
self.ipAddress = ipAddress
self.macAddress = macAddress
self.responders = []
self.decoder = ImpactDecoder.EthDecoder()
self.initPcap()
self.initFingerprint(emmulating, nmapOSDB)
self.initSequenceGenerators()
self.openTCPPorts = openTCPPorts
self.openUDPPorts = openUDPPorts
print self
def openUDPPort(self, port):
if self.isUDPPortOpen(port): return
self.openUDPPorts.append(port)
def isUDPPortOpen(self, port):
return port in self.openUDPPorts
def isTCPPortOpen(self, port):
return port in self.openTCPPorts
def initPcap(self):
self.pcap = pcapy.open_live(self.interface, 65535, 1, 0)
try: self.pcap.setfilter("host %s or ether host %s" % (self.ipAddress, self.macAddress))
except: self.pcap.setfilter("host %s or ether host %s" % (self.ipAddress, self.macAddress), 1, 0xFFFFFF00)
def initGenericResponders(self):
# generic responders
self.addResponder(ARPResponder(self))
self.addResponder(OpenUDPResponder(self))
self.addResponder(ClosedUDPResponder(self))
self.addResponder(OpenTCPResponder(self))
self.addResponder(ClosedTCPResponder(self))
def initFingerprint(self, emmulating, nmapOSDB):
fpm = os_ident.NMAP2_Fingerprint_Matcher('')
f = file(nmapOSDB, 'r')
for text in fpm.fingerprints(f):
fingerprint = fpm.parse_fp(text)
if fingerprint.get_id() == emmulating:
self.fingerprint = fingerprint
self.simplifyFingerprint()
# print fingerprint
return
raise Exception, "Couldn't find fingerprint data for %r" % emmulating
def simplifyFingerprint(self):
tests = self.fingerprint.get_tests()
for probeName in tests:
probe = tests[probeName]
for test in probe:
probe[test] = probe[test].split('|')[0]
def initSequenceGenerators(self):
self.initIPIDGenerator()
self.initTCPISNGenerator()
self.initTCPTSGenerator()
def initIPIDGenerator(self):
seq = self.fingerprint.get_tests()['SEQ']
self.ip_ID = 0
try: TI = seq['TI']
except: TI = 'O'
if TI == 'Z': self.ip_ID_delta = 0
elif TI == 'RD': self.ip_ID_delta = 30000
elif TI == 'RI': self.ip_ID_delta = 1234
elif TI == 'BI': self.ip_ID_delta = 1024+256
elif TI == 'I': self.ip_ID_delta = 1
elif TI == 'O': self.ip_ID_delta = 123
else: self.ip_ID_delta = int(TI, 16)
try: ss = seq['SS']
except: ss = 'O'
self.ip_ID_ICMP_delta = None
if ss == 'S': self.ip_ID_ICMP = None
else:
self.ip_ID_ICMP = 0
try: II = seq['II']
except: II = 'O'
if II == 'Z': self.ip_ID_ICMP_delta = 0
elif II == 'RD': self.ip_ID_ICMP_delta = 30000
elif II == 'RI': self.ip_ID_ICMP_delta = 1234
elif II == 'BI': self.ip_ID_ICMP_delta = 1024+256
elif II == 'I': self.ip_ID_ICMP_delta = 1
elif II == 'O': self.ip_ID_ICMP_delta = 123
else: self.ip_ID_ICMP_delta = int(II, 16)
# generate a few, so we don't start with 0 when we don't have to
for i in range(10):
self.getIPID()
self.getIPID_ICMP()
print "IP ID Delta: %d" % self.ip_ID_delta
print "IP ID ICMP Delta: %s" % self.ip_ID_ICMP_delta
def initTCPISNGenerator(self):
# tcp_ISN and tcp_ISN_delta for TCP Initial sequence numbers
self.tcp_ISN = 0
try:
self.tcp_ISN_GCD = int(self.fingerprint.get_tests()['SEQ']['GCD'].split('-')[0], 16)
except:
self.tcp_ISN_GCD = 1
try:
isr = self.fingerprint.get_tests()['SEQ']['ISR'].split('-')
if len(isr) == 1:
isr = int(isr[0], 16)
else:
isr = (int(isr[0], 16) + int(isr[1], 16)) / 2
except:
isr = 0
try:
sp = self.fingerprint.get_tests()['SEQ']['SP'].split('-')
sp = int(sp[0], 16)
except:
sp = 0
self.tcp_ISN_stdDev = (2**(sp/8.0)) * 5 / 4 # n-1 on small populations... erm...
if self.tcp_ISN_GCD > 9:
self.tcp_ISN_stdDev *= self.tcp_ISN_GCD
self.tcp_ISN_stdDev *= self.AssumedTimeIntervalPerPacket
self.tcp_ISN_delta = 2**(isr/8.0) * self.AssumedTimeIntervalPerPacket
# generate a few, so we don't start with 0 when we don't have to
for i in range(10): self.getTCPSequence()
print "TCP ISN Delta: %f" % self.tcp_ISN_delta
print "TCP ISN Standard Deviation: %f" % self.tcp_ISN_stdDev
def initTCPTSGenerator(self):
# tcp_TS and tcp_TS_delta for TCP Time stamp generation
self.tcp_TS = 0
try: ts = self.fingerprint.get_tests()['SEQ']['TS']
except: ts = 'U'
if ts == 'U' or ts == 'Z': self.tcp_TS_delta = 0
else:
self.tcp_TS_delta = (2**int(ts, 16)) * self.AssumedTimeIntervalPerPacket
# generate a few, so we don't start with 0 when we don't have to
for i in range(10): self.getTCPTimeStamp()
print "TCP TS Delta: %f" % self.tcp_TS_delta
def getIPID(self):
answer = self.ip_ID
self.ip_ID += self.ip_ID_delta
self.ip_ID %= 0x10000L
# print "IP ID: %x" % answer
return answer
def getIPID_ICMP(self):
if self.ip_ID_ICMP is None:
return self.getIPID()
answer = self.ip_ID_ICMP
self.ip_ID_ICMP += self.ip_ID_ICMP_delta
self.ip_ID_ICMP %= 0x10000L
# print "---> IP ID: %x" % answer
return answer
def getTCPSequence(self):
answer = self.tcp_ISN + self.tcp_ISN_stdDev # *random.random()
self.tcp_ISN_stdDev *= -1
answer = int(int(answer/self.tcp_ISN_GCD) * self.tcp_ISN_GCD)
self.tcp_ISN += self.tcp_ISN_delta
self.tcp_ISN %= 0x100000000L
# print "---> TCP Sequence: %d" % (answer % 0x100000000L)
return answer % 0x100000000L
def getTCPTimeStamp(self):
answer = int(round(self.tcp_TS))
self.tcp_TS += self.tcp_TS_delta
self.tcp_TS %= 0x100000000L
# print "---> TCP Time Stamp: %x" % answer
return answer
def sendPacket(self, onion):
if not onion: return
print "--> Packet sent:"
#print onion[0]
#print
self.pcap.sendpacket(onion[O_ETH].get_packet())
def addResponder(self, aResponder):
self.responders.append(aResponder)
def run(self):
while 1:
p = self.pcap.next()
try: in_onion = [self.decoder.decode(p[1])]
except: in_onion = [self.decoder.decode(p[0])]
try:
while 1: in_onion.append(in_onion[-1].child())
except:
pass
#print "-------------- Received: ", in_onion[0]
for r in self.responders:
if r.process(in_onion): break
def main():
def initResponders(machine):
# cmd responder
# machine.addResponder(UDPCommandResponder(machine).set_port(UDP_CMD_PORT))
# nmap2 specific responders
machine.addResponder(nmap2_SEQ1(machine))
machine.addResponder(nmap2_SEQ2(machine))
machine.addResponder(nmap2_SEQ3(machine))
machine.addResponder(nmap2_SEQ4(machine))
machine.addResponder(nmap2_SEQ5(machine))
machine.addResponder(nmap2_SEQ6(machine))
machine.addResponder(nmap2_ECN(machine))
machine.addResponder(nmap2_T2(machine))
machine.addResponder(nmap2_T3(machine))
machine.addResponder(nmap2_T4(machine))
machine.addResponder(nmap2_T5(machine))
machine.addResponder(nmap2_T6(machine))
machine.addResponder(nmap2_T7(machine))
machine.addResponder(nmap2_ICMP_1(machine))
machine.addResponder(nmap2_ICMP_2(machine))
machine.addResponder(NMAP2UDPResponder(machine))
from sys import argv, exit
def usage():
print """
if arg == '-h': usage()
if arg == '--help': usage()
if arg == '-f': Fingerprint = value
if arg == '-p': IP = value
if arg == '-m': MAC = value
if arg == '-i': IFACE = value
if arg == '-d': nmapOsDB = value
where:
arg = argv[i]
value = argv[i+1]
"""
exit()
global Fingerprint, IFACE, MAC, IP, nmapOSDB
for i in xrange(len(argv)):
arg = argv[i]
try: value = argv[i+1]
except: value = None
if arg == '-h': usage()
if arg == '--help': usage()
if arg == '-f': Fingerprint = value
if arg == '-p': IP = value
if arg == '-m': MAC = value
if arg == '-i': IFACE = value
if arg == '-d': nmapOSDB = value
print "Emulating: %r" % Fingerprint
print "at %s / %s / %s" % (IFACE, MAC, IP)
machine = Machine(
Fingerprint,
IFACE,
IP,
MAC,
OPEN_TCP_PORTS,
OPEN_UDP_PORTS,
nmapOSDB = nmapOSDB)
initResponders(machine)
machine.initGenericResponders()
machine.run()
if __name__ == '__main__':
# Init the example's logger theme
logger.init()
main()
# All Probes
# [x] SEQ
# [x] OPS
# [x] WIN
# [x] T1
# [x] T2
# [x] T3
# [x] T4
# [x] T5
# [x] T6
# [x] T7
# [x] IE
# [x] ECN
# [x] U1
# All Tests
# SEQ()
# [x] TCP ISN sequence predictability index (SP)
# [x] TCP ISN greatest common divisor (GCD)
# [x] TCP ISN counter rate (ISR)
# [x] IP ID sequence generation algorithm on TCP Open ports (TI)
# [x] Z - All zeros
# [x] RD - Random: It increments at least once by at least 20000.
# [-] Hex Value - fixed IP ID
# [x] RI - Random positive increments. Any (delta_i > 1000, and delta_i % 256 != 0) or (delta_i > 256000 and delta_i % 256 == 0)
# [x] BI - Broken increment. All delta_i % 256 = 0 and all delta_i <= 5120.
# [x] I - Incremental. All delta_i < 10
# [x] O - (Ommited, the test does not show in the fingerprint). None of the other
# [-] IP ID sequence generation algorithm on TCP closed ports (CI)
# [x] IP ID sequence generation algorithm on ICMP messages (II)
# [x] Shared IP ID sequence Boolean (SS)
# [x] TCP timestamp option algorithm (TS)
# [x] U - unsupported (don't send TS)
# [x] 0 - Zero
# [x] 1 - 0-5.66 (2 Hz)
# [x] 7 - 70-150 (100 Hz)
# [x] 8 - 150-350 (200 Hz)
# [x] - avg_freq = sum(TS_diff/time_diff) . round(.5 + math.log(avg_freq)/math.log(2)))
# time_diff = 0.11 segs
# OPS()
# [x] TCP options (O, O1-O6)
# WIN()
# [x] TCP initial window size (W, W1-W6)
# ECN, T1-T7
# [x] TCP options (O, O1-O6)
# [x] TCP initial window size (W, W1-W6)
# [x] Responsiveness (R)
# [x] IP don't fragment bit (DF)
# [x] IP initial time-to-live (T)
# [x] IP initial time-to-live guess (TG)
# [x] Explicit congestion notification (CC)
# [x] TCP miscellaneous quirks (Q)
# [x] TCP sequence number (S)
# [x] TCP acknowledgment number (A)
# [x] TCP flags (F)
# [x] TCP RST data checksum (RD)
# IE()
# [x] Responsiveness (R)
# [x] Don't fragment (ICMP) (DFI)
# [x] IP initial time-to-live (T)
# [x] IP initial time-to-live guess (TG)
# [x] ICMP response code (CD)
#-[x] IP Type of Service (TOSI)
#-[x] ICMP Sequence number (SI)
#-[x] IP Data Length (DLI)
# U1()
# [x] Responsiveness (R)
# [x] IP don't fragment bit (DF)
# [x] IP initial time-to-live (T)
# [x] IP initial time-to-live guess (TG)
# [x] IP total length (IPL)
# [x] Unused port unreachable field nonzero (UN)
# [x] Returned probe IP total length value (RIPL)
# [x] Returned probe IP ID value (RID)
# [x] Integrity of returned probe IP checksum value (RIPCK)
# [x] Integrity of returned probe UDP checksum (RUCK)
# [x] Integrity of returned UDP data (RUD)
# [-] ??? (TOS) Type of Service
# [-] ??? (RUL) Length of return UDP packet is correct
# sudo nmap -O 127.0.0.2 -p 22,111,89
# sudo python nmapAnswerMachine.py -i eth0 -p 192.168.66.254 -f 'Sun Solaris 9 (SPARC)'
|
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import str
from builtins import object
import sys
class SEx_reader(object):
"""Reads in the SExtractor catalogs and stores the information in a dictionary"""
def __init__(self, catalog, preID=None, verb=None):
self.verb = verb
self.catalog = catalog
if preID:
self.preID = preID + '_'
else:
self.preID = ''
self._read_catalog()
return
def _SEx_head(self):
if self.verb:
print("\r Parsing SEx head for:", self.catalog, file=sys.stderr)
self.SExcols = {}
# Read the detection catalog
self.catstr = open(self.catalog).readlines()
for line in self.catstr:
if line[0] != '#':
break
if line[:2] == "##":
continue
try:
line = line.strip()
vals = line.split()
col = vals[1]
key = vals[2]
self.SExcols[key] = int(col) - 1
if self.verb:
print("# %-20s %s" % (key, self.SExcols[key] + 1),
file=sys.stderr)
except:
continue
self.ncols = len(list(self.SExcols.values()))
self.colnames = list(self.SExcols.keys())
#print >>sys.stderr,"# Read %s columns" % self.ncols
return
def _read_catalog(self):
if self.verb:
print("\r Reading:", self.catalog, file=sys.stderr)
self._SEx_head()
self.cat = {}
self.IDs = []
# Make the dictionary
self.cat['SHAPE'] = {}
for key in list(self.SExcols.keys()):
self.cat[key] = {}
for line in self.catstr:
vals = line.split()
if vals[0][0] == "#":
continue
ID = self.preID + str(vals[0])
self.IDs.append(ID)
for key in list(self.SExcols.keys()):
if key == 'NUMBER' or key == 'FLAGS':
self.cat[key][ID] = int(vals[self.SExcols[key]])
else:
self.cat[key][ID] = float(vals[self.SExcols[key]])
# Replace MAGERR == 0 to by something more sensible
if key[0:6] == 'MAGERR' and self.cat[key][ID] == 0:
self.cat[key][ID] = 1.0e-5
# Add the SHAPE param
self.cat['SHAPE'][ID] = (
self.cat['X_IMAGE'][ID], self.cat['Y_IMAGE'][ID],
self.cat['A_IMAGE'][ID] * self.cat['KRON_RADIUS'][ID],
self.cat['B_IMAGE'][ID] * self.cat['KRON_RADIUS'][ID],
self.cat['THETA_IMAGE'][ID])
self.nrows = len(list(self.cat['NUMBER'].values()))
return
class SEx_reader_multi(object):
"""Reads in the SExtractor catalogs and stores the information in a dictionary"""
def __init__(self, verb=None):
self.verb = verb
self.cat = {}
self.IDs = []
self.readcat = None
return
# Function to read the catalog
def read_catalog(self, catalog, preID=None):
self.catalog = catalog
self.preID = preID
if self.verb:
print("\r Reading:", self.catalog, file=sys.stderr)
self._SEx_head()
# Make the dictionaries only once
if not self.readcat:
self.cat['SHAPE'] = {}
for key in list(self.SExcols.keys()):
self.cat[key] = {}
self.readcat = 'yes'
# Go thru the whole as string
for line in self.catstr:
vals = line.split()
if vals[0][0] == "#":
continue
ID = self.preID + "_" + str(vals[0])
self.IDs.append(ID)
for key in list(self.SExcols.keys()):
if key == 'NUMBER' or key == 'FLAGS':
self.cat[key][ID] = int(vals[self.SExcols[key]])
else:
self.cat[key][ID] = float(vals[self.SExcols[key]])
# Replace MAGERR == 0 to by something more sensible
if key[0:6] == 'MAGERR' and self.cat[key][ID] == 0:
self.cat[key][ID] = 1.0e-5
# Add the SHAPE param
self.cat['SHAPE'][ID] = (
self.cat['X_IMAGE'][ID], self.cat['Y_IMAGE'][ID],
self.cat['A_IMAGE'][ID] * self.cat['KRON_RADIUS'][ID],
self.cat['B_IMAGE'][ID] * self.cat['KRON_RADIUS'][ID],
self.cat['THETA_IMAGE'][ID])
#self.nrows = len( self.cat['NUMBER'].values())
return
def _SEx_head(self):
if self.verb:
print("\r Parsing SEx head for:", self.catalog, file=sys.stderr)
self.SExcols = {}
# Read the detection catalog
self.catstr = open(self.catalog).readlines()
for line in self.catstr:
if line[0] != '#':
break
if line[:2] == "##":
continue
try:
line = line.strip()
vals = line.split()
col = vals[1]
key = vals[2]
self.SExcols[key] = int(col) - 1
#if self.verb:
# print >>sys.stderr, "# %-20s %s" % (key,self.SExcols[key]+1)
except:
continue
self.ncols = len(list(self.SExcols.values()))
self.colnames = list(self.SExcols.keys())
#print >>sys.stderr,"# Read %s columns" % self.ncols
return
#catalog = "/home/felipe/BCS/PROC/BCS2322-5417/BCS2322-5417i.cat"
#c = SEx_reader(catalog,verb=1)
#print c.IDs
#print c.colnames
#print c.nrows
#print c.ncols
|
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 23 11:13:22 2017
@author: Suhas Somnath, Stephen Jesse
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
from scipy.optimize import least_squares
import itertools as itt
import multiprocessing as mp
import time as tm
from _warnings import warn
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from ...io.io_utils import recommendCores, realToCompound
from ...io.microdata import MicroDataset, MicroDataGroup
from ...io.io_hdf5 import ioHDF5
# atom_dtype = np.dtype([('x', np.float32),
# ('y', np.float32),
# ('type', np.uint32)])
atom_dtype = np.dtype({'names': ['x', 'y', 'type'],
'formats': [np.float32, np.float32, np.float32]})
# atom_coeff_dtype = np.dtype([('Amplitude', np.float32),
# ('x', np.float32),
# ('y', np.float32),
# ('Sigma', np.float32)])
atom_coeff_dtype = np.dtype({'names': ['Amplitude', 'x', 'y', 'Sigma'],
'formats': [np.float32, np.float32, np.float32, np.float32]})
def multi_gauss_surface_fit(coef_mat, s_mat):
"""
Evaluates the provided coefficients for N gaussian peaks to generate a 2D matrix
Parameters
----------
coef_mat : 2D numpy array
Coefficients arranged as [atom, parameter] where the parameters are:
height, row, column, sigma (width of the gaussian)
s_mat : 3D numpy array
Stack of the mesh grid
Returns
-------
multi_gauss : 2D numpy array
2D matrix with the N gaussian peaks whose properties are specified in the coefficients matrix
"""
x = s_mat[:, :, 0]
y = s_mat[:, :, 1]
num_peaks = coef_mat.shape[0]
multi_gauss = np.zeros(shape=x.shape, dtype=np.float32)
for peak_ind in range(num_peaks):
amp = coef_mat[peak_ind, 0]
x_val = coef_mat[peak_ind, 1]
y_val = coef_mat[peak_ind, 2]
sigma = coef_mat[peak_ind, 3]
gauss = amp * np.exp(-((x - x_val) ** 2 + (y - y_val) ** 2) / sigma ** 2)
multi_gauss += gauss
return multi_gauss
def fit_atom_pos(single_parm):
"""
Fits the position of a single atom.
Parameters
----------
single_parm : tuple
atom_ind : unsigned integer
Index of the atom being fitted
parm_dict : dictionary
Dictionary containing all the guess values, table of nearest neighbors for each atom, and the original image
fitting_parms : dictionary
Dictionary of the many fitting parameters
Returns
-------
coef_guess_mat : 2D numpy array
guess coefficients for the set of N atoms
coef_fit_mat : 2D numpy array
Fit coefficients for the set of N atoms
This function also returns all intermediate results for debugging purposes if parm_dict['verbose']=True
"""
atom_ind = single_parm[0]
parm_dict = single_parm[1]
fitting_parms = single_parm[2]
all_atom_guesses = parm_dict['atom_pos_guess']
closest_neighbors_mat = parm_dict['nearest_neighbors']
cropped_clean_image = parm_dict['cropped_cleaned_image']
fit_region_size = fitting_parms['fit_region_size']
gauss_width_guess = fitting_parms['gauss_width_guess']
num_nearest_neighbors = fitting_parms['num_nearest_neighbors']
min_amplitude = fitting_parms['min_amplitude']
max_amplitude = fitting_parms['max_amplitude']
position_range = fitting_parms['position_range']
max_function_evals = fitting_parms['max_function_evals']
min_gauss_width_ratio = fitting_parms['min_gauss_width_ratio']
max_gauss_width_ratio = fitting_parms['max_gauss_width_ratio']
verbose = False
if 'verbose' in parm_dict:
verbose = parm_dict['verbose']
x_center_atom = all_atom_guesses[atom_ind, 0]
y_center_atom = all_atom_guesses[atom_ind, 1]
x_neighbor_atoms = all_atom_guesses[closest_neighbors_mat[atom_ind], 0]
y_neighbor_atoms = all_atom_guesses[closest_neighbors_mat[atom_ind], 1]
x_range = slice(max(int(np.round(x_center_atom - fit_region_size)), 0),
min(int(np.round(x_center_atom + fit_region_size)),
cropped_clean_image.shape[0]))
y_range = slice(max(int(np.round(y_center_atom - fit_region_size)), 0),
min(int(np.round(y_center_atom + fit_region_size)),
cropped_clean_image.shape[1]))
will_fail = False
# Stephen says that it does not matter if guesses are outside but the fit does not work
# well when guesses are outside the window
x_outside = np.hstack((np.where(x_neighbor_atoms < x_range.start)[0],
np.where(x_neighbor_atoms > x_range.stop)[0]))
y_outside = np.hstack((np.where(y_neighbor_atoms < y_range.start)[0],
np.where(y_neighbor_atoms > y_range.stop)[0]))
guesses_outside = np.unique(np.hstack((x_outside, y_outside)))
if guesses_outside.size >= 0.5 * num_nearest_neighbors:
if verbose:
warn('Atom {}: Too few ({} of {}) neighbors within window to fit'.format(atom_ind, num_nearest_neighbors -
guesses_outside.size,
num_nearest_neighbors))
will_fail = True
else:
guesses_inside = np.invert(np.in1d(np.arange(num_nearest_neighbors), guesses_outside))
x_neighbor_atoms = x_neighbor_atoms[guesses_inside]
y_neighbor_atoms = y_neighbor_atoms[guesses_inside]
num_nearest_neighbors = x_neighbor_atoms.size
fit_region = cropped_clean_image[x_range, y_range]
# define x and y fitting range
s1, s2 = np.meshgrid(range(x_range.start, x_range.stop),
range(y_range.start, y_range.stop))
s_mat = np.dstack((s1.T, s2.T))
# initial guess values
x_guess = np.hstack((x_center_atom, x_neighbor_atoms))
y_guess = np.hstack((y_center_atom, y_neighbor_atoms))
a_guess = cropped_clean_image[np.uint32(x_guess), np.uint32(y_guess)]
sigma_guess = gauss_width_guess * np.ones(num_nearest_neighbors + 1)
coef_guess_mat = np.transpose(np.vstack((a_guess, x_guess,
y_guess, sigma_guess)))
# Set up upper and lower bounds:
lb_mat = [min_amplitude * np.ones(num_nearest_neighbors + 1),
coef_guess_mat[:, 1] - position_range,
coef_guess_mat[:, 2] - position_range,
min_gauss_width_ratio * gauss_width_guess * np.ones(num_nearest_neighbors + 1)]
ub_mat = [max_amplitude * np.ones(num_nearest_neighbors + 1),
coef_guess_mat[:, 1] + position_range,
coef_guess_mat[:, 2] + position_range,
max_gauss_width_ratio * gauss_width_guess * np.ones(num_nearest_neighbors + 1)]
lb_mat = np.transpose(lb_mat)
ub_mat = np.transpose(ub_mat)
if will_fail:
coef_fit_mat = coef_guess_mat
plsq = None
else:
# Now refine the positions!
def gauss_2d_residuals(parms_vec, orig_data_mat, x_data_mat):
"""
Calculates the residual
Parameters
----------
parms_vec : 1D numpy array
Raveled version of the parameters matrix
orig_data_mat : 2D numpy array
Section of the image being fitted
x_data_mat : 3D numpy array
Returns
-------
err_vec : 1D numpy array
Difference between the original data and the matrix obtained by evaluating parms_vec with x_data_mat
"""
# Only need to reshape the parms from 1D to 2D
parms_mat = np.reshape(parms_vec, (-1, 4))
err = orig_data_mat - multi_gauss_surface_fit(parms_mat, x_data_mat)
return err.ravel()
plsq = least_squares(gauss_2d_residuals,
coef_guess_mat.ravel(),
args=(fit_region, s_mat),
bounds=(lb_mat.ravel(), ub_mat.ravel()),
jac='2-point', max_nfev=max_function_evals)
coef_fit_mat = np.reshape(plsq.x, (-1, 4))
if verbose:
return coef_guess_mat, lb_mat, ub_mat, coef_fit_mat, fit_region, s_mat, plsq
else:
return coef_guess_mat, coef_fit_mat
def fit_atom_positions_parallel(parm_dict, fitting_parms, num_cores=None):
"""
Fits the positions of N atoms in parallel
Parameters
----------
parm_dict : dictionary
Dictionary containing the guess positions, nearest neighbors and original image
fitting_parms : dictionary
Parameters used for atom position fitting
num_cores : unsigned int (Optional. Default = available logical cores - 2)
Number of cores to compute with
Returns
-------
results : list of tuples
Guess and fit coefficients
"""
parm_dict['verbose'] = False
all_atom_guesses = parm_dict['atom_pos_guess']
t_start = tm.time()
num_cores = recommendCores(all_atom_guesses.shape[0], requested_cores=num_cores, lengthy_computation=False)
if num_cores>1:
pool = mp.Pool(processes=num_cores)
parm_list = itt.izip(range(all_atom_guesses.shape[0]), itt.repeat(parm_dict), itt.repeat(fitting_parms))
chunk = int(all_atom_guesses.shape[0] / num_cores)
jobs = pool.imap(fit_atom_pos, parm_list, chunksize=chunk)
results = [j for j in jobs]
pool.close()
else:
parm_list = itt.izip(range(all_atom_guesses.shape[0]), itt.repeat(parm_dict), itt.repeat(fitting_parms))
results = [fit_atom_pos(parm) for parm in parm_list]
tot_time = np.round(tm.time() - t_start)
print('Took {} sec to find {} atoms with {} cores'.format(tot_time, len(results), num_cores))
return results
def fit_atom_positions_dset(h5_grp, fitting_parms=None, num_cores=None):
"""
A temporary substitute for a full-fledged process class.
Computes the guess and fit coefficients for the provided atom guess positions and writes these results to the
given h5 group
Parameters
----------
h5_grp : h5py.Group reference
Group containing the atom guess positions, cropped clean image and some necessary parameters
fitting_parms : dictionary
Parameters used for atom position fitting
num_cores : unsigned int (Optional. Default = available logical cores - 2)
Number of cores to compute with
Returns
-------
h5_grp : h5py.Group reference
Same group as the parameter but now with the 'Guess' and 'Fit' datasets
"""
cropped_clean_image = h5_grp['Cropped_Clean_Image'][()]
h5_guess = h5_grp['Guess_Positions']
all_atom_guesses = np.transpose(np.vstack((h5_guess['x'], h5_guess['y']))) # leave out the atom type for now
win_size = h5_grp.attrs['motif_win_size']
psf_width = h5_grp.attrs['psf_width']
num_atoms = all_atom_guesses.shape[0] # number of atoms
# build distance matrix
pos_vec = all_atom_guesses[:, 0] + 1j * all_atom_guesses[:, 1]
pos_mat1 = np.tile(np.transpose(np.atleast_2d(pos_vec)), [1, num_atoms])
pos_mat2 = np.transpose(pos_mat1)
d_mat = np.abs(pos_mat2 - pos_mat1) # matrix of distances between all atoms
# sort the distance matrix and keep only the atoms within the nearest neighbor limit
neighbor_dist_order = np.argsort(d_mat)
if fitting_parms is None:
num_nearest_neighbors = 6 # to consider when fitting
fitting_parms = {'fit_region_size': win_size * 0.80, # region to consider when fitting
'gauss_width_guess': psf_width * 2,
'num_nearest_neighbors': num_nearest_neighbors,
'min_amplitude': 0, # min amplitude limit for gauss fit
'max_amplitude': 2, # max amplitude limit for gauss fit
'position_range': win_size / 2,
# range that the fitted position can go from initial guess position[pixels]
'max_function_evals': 100,
'min_gauss_width_ratio': 0.5, # min width of gauss fit ratio,
'max_gauss_width_ratio': 2, # max width of gauss fit ratio
'fitting_tolerance': 1E-4}
num_nearest_neighbors = fitting_parms['num_nearest_neighbors']
# neighbor dist order has the (indices of the) neighbors for each atom sorted by distance
closest_neighbors_mat = neighbor_dist_order[:, 1:num_nearest_neighbors + 1]
parm_dict = {'atom_pos_guess': all_atom_guesses,
'nearest_neighbors': closest_neighbors_mat,
'cropped_cleaned_image': cropped_clean_image}
# do the parallel fitting
fitting_results = fit_atom_positions_parallel(parm_dict, fitting_parms, num_cores=num_cores)
# Make datasets to write back to file:
guess_parms = np.zeros(shape=(num_atoms, num_nearest_neighbors+1), dtype=atom_coeff_dtype)
fit_parms = np.zeros(shape=guess_parms.shape, dtype=guess_parms.dtype)
for atom_ind, single_atom_results in enumerate(fitting_results):
guess_coeff, fit_coeff = single_atom_results
num_neighbors_used = guess_coeff.shape[0]
guess_parms[atom_ind, :num_neighbors_used] = np.squeeze(realToCompound(guess_coeff, guess_parms.dtype))
fit_parms[atom_ind, :num_neighbors_used] = np.squeeze(realToCompound(fit_coeff, guess_parms.dtype))
ds_atom_guesses = MicroDataset('Guess', data=guess_parms)
ds_atom_fits = MicroDataset('Fit', data=fit_parms)
dgrp_atom_finding = MicroDataGroup(h5_grp.name.split('/')[-1], parent=h5_grp.parent.name)
dgrp_atom_finding.attrs = fitting_parms
dgrp_atom_finding.addChildren([ds_atom_guesses, ds_atom_fits])
hdf = ioHDF5(h5_grp.file)
h5_atom_refs = hdf.writeData(dgrp_atom_finding)
return h5_grp
def visualize_atom_fit(atom_rough_pos, all_atom_guesses, parm_dict, fitting_parms, cropped_clean_image):
"""
Computes the fit for a given atom and plots the results
Parameters
----------
atom_rough_pos : tuple
row, column position of the atom from the guess
all_atom_guesses : 2D numpy array
Guesses of atom positions arranged as [atom index, row(0) and column(1)]
parm_dict : dictionary
Dictionary containing the guess positions, nearest neighbors and original image
fitting_parms : dictionary
Parameters used for atom position fitting
cropped_clean_image : 2D numpy array
original image to fit to
Returns
-------
coef_guess_mat : 2D numpy array
Coefficients arranged as [atom, parameter] where the parameters are:
height, row, column, sigma (width of the gaussian)
lb_mat : 2D numpy array
Lower bounds for the fits
ub_mat : 2D numpy array
Upper bounds for the fits
coef_fit_mat : 2D numpy array
Coefficients arranged as [atom, parameter] where the parameters are:
height, row, column, sigma (width of the gaussian)
fit_region : 2D numpy array
Section of the image being fitted
s_mat : 3D numpy array
Stack of the mesh grid
plsq : Least squares fit object
Use this to find if the fitting went well
fig_01 : matplotlib.pyplot.figure handle
Handle to figure 1
fig_02 : matplotlib.pyplot.figure handle
Handle to figure 2
"""
temp_dist = np.abs(
all_atom_guesses[:, 0] + 1j * all_atom_guesses[:, 1] - (atom_rough_pos[0] + 1j * atom_rough_pos[1]))
atom_ind = np.argsort(temp_dist)[0]
parm_dict['verbose'] = True
coef_guess_mat, lb_mat, ub_mat, coef_fit_mat, fit_region, s_mat, plsq = fit_atom_pos((atom_ind, parm_dict, fitting_parms))
print('\tAmplitude\tx position\ty position\tsigma')
print('-------------------GUESS---------------------')
print(coef_guess_mat)
print('-------------------LOWER---------------------')
print(lb_mat)
print('-------------------UPPER---------------------')
print(ub_mat)
print('--------------------FIT----------------------')
print(coef_fit_mat)
print('-------------- LEAST SQUARES ----------------')
print(plsq.message)
print('Function evaluations: {}\nJacobian evaluations: {}'.format(plsq.nfev, plsq.njev))
gauss_2d_guess = multi_gauss_surface_fit(coef_guess_mat, s_mat)
gauss_2d_fit = multi_gauss_surface_fit(coef_fit_mat, s_mat)
fit_region_size = fitting_parms['fit_region_size']
fig_01, axis = plt.subplots(figsize=(8, 8))
axis.hold(True) # Without this, plots do not show up on the notebooks
axis.imshow(cropped_clean_image, interpolation='none', cmap="gray")
axis.add_patch(patches.Rectangle((all_atom_guesses[atom_ind, 1] - fit_region_size,
all_atom_guesses[atom_ind, 0] - fit_region_size),
2 * fit_region_size, 2 * fit_region_size, fill=False,
color='orange', linewidth=2))
axis.scatter(all_atom_guesses[:, 1], all_atom_guesses[:, 0], color='yellow')
axis.scatter(all_atom_guesses[atom_ind, 1], all_atom_guesses[atom_ind, 0], color='red')
axis.scatter(coef_guess_mat[1:, 2], coef_guess_mat[1:, 1], color='green')
fig_01.tight_layout()
fig_02, axes = plt.subplots(ncols=2, nrows=2, figsize=(10, 10))
for axis, img_mat, coeff_mat, pos_mat, img_title in zip(axes.flat,
[fit_region, fit_region, gauss_2d_guess, gauss_2d_fit],
[coef_guess_mat, coef_fit_mat, coef_guess_mat,
coef_fit_mat],
[all_atom_guesses, all_atom_guesses, all_atom_guesses,
all_atom_guesses],
['Original + guess pos', 'Original + fit pos', 'Guess',
'Fit']):
centered_pos_mat = np.copy(coeff_mat[:, 1:3])
# TODO: This is not necessarily correct, especially when the window extends beyond the image
centered_pos_mat[:, 0] -= pos_mat[atom_ind, 0] - (0.5 * fit_region.shape[0])
centered_pos_mat[:, 1] -= pos_mat[atom_ind, 1] - (0.5 * fit_region.shape[1])
axis.hold(True) # Without this, plots do not show up on the notebooks
axis.imshow(img_mat, cmap="gray")
axis.set_title(img_title)
axis.scatter(centered_pos_mat[1:, 1], centered_pos_mat[1:, 0], color='orange')
axis.scatter(centered_pos_mat[0, 1], centered_pos_mat[0, 0], color='red')
fig_02.tight_layout()
return coef_guess_mat, lb_mat, ub_mat, coef_fit_mat, fit_region, s_mat, plsq, fig_01, fig_02
def remove_duplicate_labels(atom_labels, psf_width, double_cropped_image, distance_multiplier=1.5,
num_neighbors=6, show_culprit_plot=False):
"""
Removes incorrect labels for atoms having multiple labels
Parameters
----------
atom_labels : list of 2D numpy arrays
List of coordinates (row, col) for the different atom families
psf_width : float
PSF width
double_cropped_image : 2D numpy array
Image that goes along with the provided coordinates
distance_multiplier : float (Optional. Default = 1.5)
(Upto) how many times the PSF width is considered too close to an existing label
num_neighbors : unsigned int
Number of neighbors for the K Nearest Neighbors classifier
show_culprit_plot : Boolean (Optional. Default = False)
Whether or not to show the two intermediate plots
Returns
-------
new_atom_labels : list of 2D numpy arrays
List of coordinates (row, col) for the different atom families with duplicates removed
"""
# In certain cases, the same atom is identified by two or more different classes:
all_atom_pos = np.vstack(atom_labels)
atom_families = list()
for family_ind, family in enumerate(atom_labels):
atom_families.append(np.ones(shape=family.shape[0], dtype=np.uint32) * family_ind)
atom_families = np.hstack(atom_families)
# build distance matrix
pos_vec = all_atom_pos[:, 0] + 1j * all_atom_pos[:, 1]
pos_mat1 = np.tile(np.transpose(np.atleast_2d(pos_vec)), [1, all_atom_pos.shape[0]])
pos_mat2 = np.transpose(pos_mat1)
d_mat = np.abs(pos_mat2 - pos_mat1) # matrix of distances between all atoms
# replace the diagonal with zeros and then by some large number:
d_mat = np.tril(d_mat, -1)
d_mat[d_mat == 0] = 100 * psf_width
# Now find the atoms which are too close to each other:
culprits = np.vstack(np.where(d_mat <= distance_multiplier * psf_width)).T
# the culprits should be arranged as pairs in a N,2 matrix
if culprits.size == 0:
# nothing to remove
return atom_labels
if np.unique(culprits).size != culprits.size:
print('Warning: Three atoms found to be close to each other!')
good_atom_inds = np.ones(all_atom_pos.shape[0], dtype=bool)
good_atom_inds[culprits.flat] = False
# overlay atom pair, positions on original image
if show_culprit_plot:
fig, axis = plt.subplots(figsize=(14, 14))
axis.hold(True)
axis.imshow(double_cropped_image, interpolation='none', cmap="gray")
axis.scatter(all_atom_pos[culprits[:, 0], 1], all_atom_pos[culprits[:, 0], 0], color='yellow')
axis.scatter(all_atom_pos[culprits[:, 1], 1], all_atom_pos[culprits[:, 1], 0], color='red')
axis.scatter(all_atom_pos[good_atom_inds, 1], all_atom_pos[good_atom_inds, 0], color='cyan');
# Now classify the culprit pairs into the correct family
classifier = KNeighborsClassifier(n_neighbors=num_neighbors)
new_culprit_families = list()
for culprit_pair in culprits:
fam_1 = atom_families[culprit_pair[0]]
fam_2 = atom_families[culprit_pair[1]]
good_fam_1_atoms = np.logical_and(good_atom_inds, atom_families == fam_1)
good_fam_2_atoms = np.logical_and(good_atom_inds, atom_families == fam_2)
good_atom_2_fams = np.logical_or(good_fam_1_atoms, good_fam_2_atoms)
classifier.fit(all_atom_pos[good_atom_2_fams], atom_families[good_atom_2_fams])
answers = classifier.predict(all_atom_pos[culprit_pair])
final_family = np.unique(answers)
if final_family.size > 1:
print('Classifier unsucessful:', culprit_pair, final_family)
new_culprit_families.append(final_family[0])
# print 'Originally classified as: ', fam_1, ', ', fam_2, ', Classified later as:', new_culprit_families[-1]
# Figure out which of the culprits to keep - the one closer to the center of the atom center - higher amplitude
# For some reason this is not working as expected
neighbor_size = 1
culprits_to_keep = list()
culprits_to_discard = list()
for culprit_pair in culprits:
amplitude_pair = list()
# print culprit_pair
for atom_ind in culprit_pair:
row_ind = int(np.round(all_atom_pos[atom_ind, 0]))
col_ind = int(np.round(all_atom_pos[atom_ind, 1]))
img_section = double_cropped_image[max(0, row_ind - neighbor_size):
min(double_cropped_image.shape[0], row_ind + neighbor_size),
max(0, col_ind - neighbor_size):
min(double_cropped_image.shape[1], col_ind + neighbor_size)]
amplitude_pair.append(np.max(img_section))
# print amplitude_pair
if amplitude_pair[0] > amplitude_pair[1]:
culprits_to_keep.append(culprit_pair[0])
culprits_to_discard.append(culprit_pair[1])
else:
culprits_to_keep.append(culprit_pair[1])
culprits_to_discard.append(culprit_pair[0])
if show_culprit_plot:
fig, axis = plt.subplots(figsize=(14, 14))
axis.hold(True)
col_map = plt.cm.jet
axis.imshow(double_cropped_image, interpolation='none', cmap="gray")
axis.scatter(all_atom_pos[culprits.flat, 1], all_atom_pos[culprits.flat, 0], color='yellow')
axis.scatter(all_atom_pos[culprits_to_discard, 1], all_atom_pos[culprits_to_discard, 0], color='red')
axis.scatter(all_atom_pos[culprits_to_keep, 1], all_atom_pos[culprits_to_keep, 0], color='cyan')
# Update the correct family from the classifier
new_atom_families = np.copy(atom_families)
for culprit_pair, new_family in zip(culprits, new_culprit_families):
new_atom_families[culprit_pair] = new_family # <- This is fine
# make a list of atoms without the bad culprits
new_good_atoms = np.ones(all_atom_pos.shape[0], dtype=bool)
new_good_atoms[culprits_to_discard] = False # <----- problem here
# new_good_atoms[culprits_to_keep] = True
# make cropped versions of the positions and labels using this mask:
new_atom_pos = all_atom_pos[new_good_atoms]
new_atom_families = new_atom_families[new_good_atoms]
# Now reconstruct the list of arrays we started with:
new_atom_labels = list()
for atom_ind in range(len(all_atom_pos)):
new_atom_labels.append(new_atom_pos[new_atom_families == atom_ind])
return new_atom_labels
|
|
"""
utilities
"""
import os
import sys
import time
import zlib
import uuid
import math
import fcntl
import base64
import random
import inspect
import datetime
import threading
import traceback
import functools
import Cryptodome.Random
import Cryptodome.Hash.HMAC
import Cryptodome.Cipher.AES
from future.utils import iteritems
from contextlib import contextmanager
try:
from threading import get_ident
except ImportError:
from thread import get_ident
from .work_spec import WorkSpec
from .file_spec import FileSpec
from .event_spec import EventSpec
from pandalogger.PandaLogger import PandaLogger
from pandalogger.LogWrapper import LogWrapper
from pandaharvester.harvesterconfig import harvester_config
with_memory_profile = False
# lock for synchronization
sync_lock = threading.Lock()
# synchronize decorator
def synchronize(func):
""" synchronize decorator """
@functools.wraps(func)
def wrapper(*args, **kwargs):
with sync_lock:
return func(*args, **kwargs)
return wrapper
# stopwatch class
class StopWatch(object):
# constructor
def __init__(self):
self.startTime = datetime.datetime.utcnow()
# get elapsed time
def get_elapsed_time(self):
diff = datetime.datetime.utcnow() - self.startTime
return " : took {0}.{1:03} sec".format(diff.seconds + diff.days * 24 * 3600,
diff.microseconds // 1000)
# get elapsed time in seconds
def get_elapsed_time_in_sec(self, precise=False):
diff = datetime.datetime.utcnow() - self.startTime
if precise:
return diff.seconds + diff.days * 24 * 3600 + diff.microseconds * 1e-6
else:
return diff.seconds + diff.days * 24 * 3600
# reset
def reset(self):
self.startTime = datetime.datetime.utcnow()
# map with lock
class MapWithLock(object):
def __init__(self):
self.lock = threading.Lock()
self.dataMap = dict()
def __getitem__(self, item):
ret = self.dataMap.__getitem__(item)
return ret
def __setitem__(self, item, value):
self.dataMap.__setitem__(item, value)
def __contains__(self, item):
ret = self.dataMap.__contains__(item)
return ret
def acquire(self):
self.lock.acquire()
def release(self):
self.lock.release()
def iteritems(self):
return iteritems(self.dataMap)
# singleton distinguishable with id
class SingletonWithID(type):
def __init__(cls, *args,**kwargs):
cls.__instance = {}
super(SingletonWithID, cls).__init__(*args, **kwargs)
@synchronize
def __call__(cls, *args, **kwargs):
obj_id = str(kwargs.get('id', ''))
if obj_id not in cls.__instance:
cls.__instance[obj_id] = super(SingletonWithID, cls).__call__(*args, **kwargs)
return cls.__instance.get(obj_id)
# singleton distinguishable with each thread and id
class SingletonWithThreadAndID(type):
def __init__(cls, *args,**kwargs):
cls.__instance = {}
super(SingletonWithThreadAndID, cls).__init__(*args, **kwargs)
@synchronize
def __call__(cls, *args, **kwargs):
thread_id = get_ident()
obj_id = (thread_id, str(kwargs.get('id', '')))
if obj_id not in cls.__instance:
cls.__instance[obj_id] = super(SingletonWithThreadAndID, cls).__call__(*args, **kwargs)
return cls.__instance.get(obj_id)
# enable memory profiling
def enable_memory_profiling():
global with_memory_profile
with_memory_profile = True
# setup logger
def setup_logger(name=None):
if name is None:
frm = inspect.stack()[1][0]
mod = inspect.getmodule(frm)
name = mod.__name__.split('.')[-1]
try:
log_level = getattr(harvester_config.log_level, name)
return PandaLogger().getLogger(name, log_level=log_level)
except Exception:
pass
return PandaLogger().getLogger(name)
# make logger
def make_logger(tmp_log, token=None, method_name=None, hook=None):
# get method name of caller
if method_name is None:
tmpStr = inspect.stack()[1][3]
else:
tmpStr = method_name
if token is not None:
tmpStr += ' <{0}>'.format(token)
else:
tmpStr += ' :'.format(token)
newLog = LogWrapper(tmp_log, tmpStr, seeMem=with_memory_profile, hook=hook)
return newLog
# dump error message
def dump_error_message(tmp_log, err_str=None, no_message=False):
if not isinstance(tmp_log, LogWrapper):
methodName = '{0} : '.format(inspect.stack()[1][3])
else:
methodName = ''
# error
if err_str is None:
errtype, errvalue = sys.exc_info()[:2]
err_str = "{0} {1} {2} ".format(methodName, errtype.__name__, errvalue)
err_str += traceback.format_exc()
if not no_message:
tmp_log.error(err_str)
return err_str
# sleep for random duration and return True if no more sleep is needed
def sleep(interval, stop_event, randomize=True):
if randomize and interval > 0:
randInterval = random.randint(int(interval * 0.4), int(interval * 1.4))
else:
randInterval = interval
if stop_event is None:
time.sleep(randInterval)
else:
i = 0
while True:
if stop_event.is_set():
return True
if i >= randInterval:
break
stop_event.wait(1)
i += 1
return False
# make PFC
def make_pool_file_catalog(jobspec_list):
xmlStr = """<?xml version="1.0" ?>
<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">
<POOLFILECATALOG>
"""
doneLFNs = set()
for jobSpec in jobspec_list:
inFiles = jobSpec.get_input_file_attributes()
for inLFN, inFile in iteritems(inFiles):
if inLFN in doneLFNs:
continue
doneLFNs.add(inLFN)
xmlStr += """ <File ID="{guid}">
<physical>
<pfn filetype="ROOT_All" name="{lfn}"/>
</physical>
<logical/>
</File>
""".format(guid=inFile['guid'], lfn=inLFN)
xmlStr += "</POOLFILECATALOG>"
return xmlStr
# calculate adler32
def calc_adler32(file_name):
val = 1
blockSize = 32 * 1024 * 1024
with open(file_name, 'rb') as fp:
while True:
data = fp.read(blockSize)
if not data:
break
val = zlib.adler32(data, val)
if val < 0:
val += 2 ** 32
return hex(val)[2:10].zfill(8).lower()
# get output file report
def get_output_file_report(jobspec):
if jobspec.outputFilesToReport is not None:
return jobspec.outputFilesToReport
# header
xml = """<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<!-- ATLAS file meta-data catalog -->
<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">
<POOLFILECATALOG>
"""
# body
for fileSpec in jobspec.outFiles:
# only successful files
if fileSpec.status != 'finished':
continue
# extract guid
if 'guid' in fileSpec.fileAttributes:
guid = fileSpec.fileAttributes['guid']
elif fileSpec.fileType == 'log':
guid = jobspec.get_logfile_info()['guid']
else:
guid = str(uuid.uuid4())
# checksum
if fileSpec.chksum is not None and ':' in fileSpec.chksum:
chksum = fileSpec.chksum.split(':')[-1]
else:
chksum = fileSpec.chksum
xml += """"<File ID="{guid}">
<logical>
<lfn name="{lfn}"/>
</logical>
<metadata att_name="fsize" att_value = "{fsize}"/>
<metadata att_name="adler32" att_value="{chksum}"/>
</File> """.format(guid=guid, lfn=fileSpec.lfn, fsize=fileSpec.fsize, chksum=chksum)
# tailor
xml += """
</POOLFILECATALOG>
"""
return xml
def create_shards(input_list, size):
"""
Creates shards of size n from the input list.
"""
shard, i = [], 0
for element in input_list:
shard.append(element)
i += 1
if i == size:
yield shard
shard, i = [], 0
if i > 0:
yield shard
# update job attributes with workers
def update_job_attributes_with_workers(map_type, jobspec_list, workspec_list, files_to_stage_out_list,
events_to_update_list):
if map_type in [WorkSpec.MT_OneToOne, WorkSpec.MT_MultiJobs]:
workSpec = workspec_list[0]
for jobSpec in jobspec_list:
jobSpec.set_attributes(workSpec.workAttributes)
# delete job metadata from worker attributes
try:
del workSpec.workAttributes[jobSpec.PandaID]['metaData']
except Exception:
pass
# set start and end times
if workSpec.status in [WorkSpec.ST_running]:
jobSpec.set_start_time()
elif workSpec.is_final_status():
jobSpec.set_end_time()
# core count
if workSpec.nCore is not None and jobSpec.nCore is None:
try:
jobSpec.nCore = int(workSpec.nCore / len(jobspec_list))
if jobSpec.nCore == 0:
jobSpec.nCore = 1
except Exception:
pass
# batch ID
if not jobSpec.has_attribute('batchID'):
if workSpec.batchID is not None:
jobSpec.set_one_attribute('batchID', workSpec.batchID)
# add files
outFileAttrs = jobSpec.get_output_file_attributes()
for tmpWorkerID, files_to_stage_out in iteritems(files_to_stage_out_list):
if jobSpec.PandaID in files_to_stage_out:
for lfn, fileAttersList in iteritems(files_to_stage_out[jobSpec.PandaID]):
for fileAtters in fileAttersList:
fileSpec = FileSpec()
fileSpec.lfn = lfn
fileSpec.PandaID = jobSpec.PandaID
fileSpec.taskID = jobSpec.taskID
fileSpec.path = fileAtters['path']
fileSpec.fsize = fileAtters['fsize']
fileSpec.fileType = fileAtters['type']
fileSpec.fileAttributes = fileAtters
fileSpec.workerID = tmpWorkerID
if 'isZip' in fileAtters:
fileSpec.isZip = fileAtters['isZip']
if 'chksum' in fileAtters:
fileSpec.chksum = fileAtters['chksum']
if 'eventRangeID' in fileAtters:
fileSpec.eventRangeID = fileAtters['eventRangeID']
# use input fileID as provenanceID
try:
provenanceID = fileSpec.eventRangeID.split('-')[2]
except Exception:
provenanceID = None
fileSpec.provenanceID = provenanceID
if lfn in outFileAttrs:
fileSpec.scope = outFileAttrs[lfn]['scope']
jobSpec.add_out_file(fileSpec)
# add events
for events_to_update in events_to_update_list:
if jobSpec.PandaID in events_to_update:
for data in events_to_update[jobSpec.PandaID]:
eventSpec = EventSpec()
eventSpec.from_data(data, jobSpec.PandaID)
jobSpec.add_event(eventSpec, None)
statusInJobAttr = jobSpec.get_job_status_from_attributes()
jobSpec.status, jobSpec.subStatus = workSpec.convert_to_job_status(statusInJobAttr)
if workSpec.pilot_closed:
jobSpec.set_pilot_closed()
if workSpec.new_status:
jobSpec.trigger_propagation()
elif map_type == WorkSpec.MT_MultiWorkers:
jobSpec = jobspec_list[0]
# scan all workers
allDone = True
isRunning = False
oneFinished = False
oneFailed = False
nCore = 0
nCoreTime = 0
for workSpec in workspec_list:
if workSpec.new_status:
jobSpec.trigger_propagation()
# the the worker is running
if workSpec.status in [WorkSpec.ST_running]:
isRunning = True
# set start time
jobSpec.set_start_time()
nCore += workSpec.nCore
# the worker is done
if workSpec.is_final_status():
if workSpec.startTime is not None and workSpec.endTime is not None:
nCoreTime += workSpec.nCore * (workSpec.endTime - workSpec.startTime).total_seconds()
if workSpec.status == WorkSpec.ST_finished:
oneFinished = True
elif workSpec.status == WorkSpec.ST_failed:
oneFailed = True
else:
# the worker is still active
allDone = False
# set final values
if allDone:
# set end time
jobSpec.set_end_time()
# time-averaged core count
if jobSpec.startTime is not None:
total_time = (jobSpec.endTime - jobSpec.startTime).total_seconds()
if total_time > 0:
jobSpec.nCore = float(nCoreTime) / float(total_time)
jobSpec.nCore = int(math.ceil(jobSpec.nCore))
# disable to get more workers
jobSpec.moreWorkers = 0
else:
# live core count
jobSpec.nCore = nCore
# combine worker attributes and set it to job
# FIXME
# jobSpec.set_attributes(workAttributes)
# add files
outFileAttrs = jobSpec.get_output_file_attributes()
for tmpWorkerID, files_to_stage_out in iteritems(files_to_stage_out_list):
if jobSpec.PandaID in files_to_stage_out:
for lfn, fileAttersList in iteritems(files_to_stage_out[jobSpec.PandaID]):
for fileAtters in fileAttersList:
fileSpec = FileSpec()
fileSpec.lfn = lfn
fileSpec.PandaID = jobSpec.PandaID
fileSpec.taskID = jobSpec.taskID
fileSpec.path = fileAtters['path']
fileSpec.fsize = fileAtters['fsize']
fileSpec.fileType = fileAtters['type']
fileSpec.fileAttributes = fileAtters
fileSpec.workerID = tmpWorkerID
if 'isZip' in fileAtters:
fileSpec.isZip = fileAtters['isZip']
if 'chksum' in fileAtters:
fileSpec.chksum = fileAtters['chksum']
if 'eventRangeID' in fileAtters:
fileSpec.eventRangeID = fileAtters['eventRangeID']
# use input fileID as provenanceID
try:
provenanceID = fileSpec.eventRangeID.split('-')[2]
except Exception:
provenanceID = None
fileSpec.provenanceID = provenanceID
if lfn in outFileAttrs:
fileSpec.scope = outFileAttrs[lfn]['scope']
jobSpec.add_out_file(fileSpec)
# add events
for events_to_update in events_to_update_list:
if jobSpec.PandaID in events_to_update:
for data in events_to_update[jobSpec.PandaID]:
eventSpec = EventSpec()
eventSpec.from_data(data, jobSpec.PandaID)
jobSpec.add_event(eventSpec, None)
# set job status
workSpec = workspec_list[0]
if allDone:
if oneFinished:
jobSpec.status, jobSpec.subStatus = workSpec.convert_to_job_status(WorkSpec.ST_finished)
elif oneFailed:
jobSpec.status, jobSpec.subStatus = workSpec.convert_to_job_status(WorkSpec.ST_failed)
else:
jobSpec.status, jobSpec.subStatus = workSpec.convert_to_job_status(WorkSpec.ST_cancelled)
else:
if isRunning or jobSpec.status == 'running':
jobSpec.status, jobSpec.subStatus = workSpec.convert_to_job_status(WorkSpec.ST_running)
else:
jobSpec.status, jobSpec.subStatus = workSpec.convert_to_job_status(WorkSpec.ST_submitted)
return True
# rollover for log files
def do_log_rollover():
PandaLogger.doRollOver()
# get stopwatch
def get_stopwatch():
return StopWatch()
# global dict for all threads
global_dict = MapWithLock()
# get global dict
def get_global_dict():
return global_dict
# get file lock
@contextmanager
def get_file_lock(file_name, lock_interval):
if os.path.exists(file_name):
opt = 'r+'
else:
opt = 'w+'
with open(file_name, opt) as f:
locked = False
try:
# lock file
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
locked = True
# read timestamp
timeNow = datetime.datetime.utcnow()
toSkip = False
try:
s = f.read()
pTime = datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S.%f")
if timeNow - pTime < datetime.timedelta(seconds=lock_interval):
toSkip = True
except Exception:
pass
# skip if still in locked interval
if toSkip:
raise IOError("skipped since still in locked interval")
# write timestamp
f.seek(0)
f.write(timeNow.strftime("%Y-%m-%d %H:%M:%S.%f"))
f.truncate()
# execute with block
yield
finally:
# unlock
if locked:
fcntl.flock(f, fcntl.LOCK_UN)
# convert a key phrase to a cipher key
def convert_phrase_to_key(key_phrase):
h = Cryptodome.Hash.HMAC.new(key_phrase)
return h.hexdigest()
# encrypt a string
def encrypt_string(key_phrase, plain_text):
k = convert_phrase_to_key(key_phrase)
v = Cryptodome.Random.new().read(Cryptodome.Cipher.AES.block_size)
c = Cryptodome.Cipher.AES.new(k, Cryptodome.Cipher.AES.MODE_CFB, v)
return base64.b64encode(v + c.encrypt(plain_text))
# decrypt a string
def decrypt_string(key_phrase, cipher_text):
cipher_text = base64.b64decode(cipher_text)
k = convert_phrase_to_key(key_phrase)
v = cipher_text[:Cryptodome.Cipher.AES.block_size]
c = Cryptodome.Cipher.AES.new(k, Cryptodome.Cipher.AES.MODE_CFB, v)
cipher_text = cipher_text[Cryptodome.Cipher.AES.block_size:]
return c.decrypt(cipher_text)
# set permission
def set_file_permission(path):
if not os.path.exists(path):
return
targets = []
if os.path.isfile(path):
targets += [path]
else:
for root, dirs, files in os.walk(path):
targets += [os.path.join(root, f) for f in files]
umask = os.umask(0)
uid = os.getuid()
gid = os.getgid()
for f in targets:
try:
os.chmod(f, 0o666 - umask)
os.chown(f, uid, gid)
except Exception:
pass
os.umask(umask)
# get URL of queues config file
def get_queues_config_url():
try:
return os.environ['HARVESTER_QUEUE_CONFIG_URL']
except Exception:
return None
# get unique queue name
def get_unique_queue_name(queue_name, resource_type):
return '{0}:{1}'.format(queue_name, resource_type)
# capability to dynamically change plugins
def dynamic_plugin_change():
try:
return harvester_config.master.dynamic_plugin_change
except Exception:
return True
# replacement for slow namedtuple in python 2
class DictTupleHybrid(tuple):
def set_attributes(self, attributes):
self.attributes = attributes
def _asdict(self):
return dict(zip(self.attributes, self))
# Make a list of choice candidates accroding to permille weight
def make_choice_list(pdpm={}, default=None):
weight_sum = sum(pdpm.values())
weight_defualt = 1000
ret_list = []
for candidate, weight in iteritems(pdpm):
if weight_sum > 1000:
real_weight = int(weight * 1000 / weight_sum)
else:
real_weight = int(weight)
ret_list.extend([candidate]*real_weight)
weight_defualt -= real_weight
ret_list.extend([default]*weight_defualt)
return ret_list
|
|
#!/usr/bin/env python
import filecmp
import fileinput
import os
import re
import signal
import stat
import time
import glob
import shutil
import difflib
import yaml
from datetime import datetime
from gppylib.commands.base import Command, ExecutionError, REMOTE
from gppylib.commands.gp import chk_local_db_running
from gppylib.db import dbconn
from gppylib.gparray import GpArray, MODE_SYNCHRONIZED, MODE_RESYNCHRONIZATION
from pygresql import pg
PARTITION_START_DATE = '2010-01-01'
PARTITION_END_DATE = '2013-01-01'
GET_APPENDONLY_DATA_TABLE_INFO_SQL = """SELECT ALL_DATA_TABLES.oid, ALL_DATA_TABLES.schemaname, ALL_DATA_TABLES.tablename, OUTER_PG_CLASS.relname AS tupletable FROM(
SELECT ALLTABLES.oid, ALLTABLES.schemaname, ALLTABLES.tablename FROM
(SELECT c.oid, n.nspname AS schemaname, c.relname AS tablename FROM pg_class c, pg_namespace n
WHERE n.oid = c.relnamespace) AS ALLTABLES,
(SELECT n.nspname AS schemaname, c.relname AS tablename
FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace
WHERE c.relkind = 'r'::CHAR AND c.oid > 16384 AND (c.relnamespace > 16384 OR n.nspname = 'public')
EXCEPT
((SELECT x.schemaname, x.partitiontablename FROM
(SELECT DISTINCT schemaname, tablename, partitiontablename, partitionlevel FROM pg_partitions) AS X,
(SELECT schemaname, tablename maxtable, max(partitionlevel) maxlevel FROM pg_partitions GROUP BY (tablename, schemaname))
AS Y
WHERE x.schemaname = y.schemaname AND x.tablename = Y.maxtable AND x.partitionlevel != Y.maxlevel)
UNION (SELECT DISTINCT schemaname, tablename FROM pg_partitions))) AS DATATABLES
WHERE ALLTABLES.schemaname = DATATABLES.schemaname AND ALLTABLES.tablename = DATATABLES.tablename AND ALLTABLES.oid NOT IN (SELECT reloid FROM pg_exttable)
) AS ALL_DATA_TABLES, pg_appendonly, pg_class OUTER_PG_CLASS
WHERE ALL_DATA_TABLES.oid = pg_appendonly.relid
AND OUTER_PG_CLASS.oid = pg_appendonly.segrelid
"""
GET_ALL_AO_DATATABLES_SQL = """
%s AND pg_appendonly.columnstore = 'f'
""" % GET_APPENDONLY_DATA_TABLE_INFO_SQL
GET_ALL_CO_DATATABLES_SQL = """
%s AND pg_appendonly.columnstore = 't'
""" % GET_APPENDONLY_DATA_TABLE_INFO_SQL
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
if master_data_dir is None:
raise Exception('MASTER_DATA_DIRECTORY is not set')
def execute_sql(dbname, sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, sql)
conn.commit()
def execute_sql_singleton(dbname, sql):
result = None
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, sql)
if result is None:
raise Exception("error running query: %s" % sql)
return result
def has_exception(context):
if not hasattr(context, 'exception'):
return False
if context.exception:
return True
else:
return False
def run_command(context, command):
context.exception = None
cmd = Command(name='run %s' % command, cmdStr='%s' % command)
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
context.exception = e
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def run_async_command(context, command):
context.exception = None
cmd = Command(name='run %s' % command, cmdStr='%s' % command)
try:
proc = cmd.runNoWait()
except ExecutionError, e:
context.exception = e
context.async_proc = proc
def run_cmd(command):
cmd = Command(name='run %s' % command, cmdStr='%s' % command)
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
print 'caught exception %s' % e
result = cmd.get_results()
return (result.rc, result.stdout, result.stderr)
def run_command_remote(context, command, host, source_file, export_mdd):
cmd = Command(name='run command %s' % command,
cmdStr='gpssh -h %s -e \'source %s; %s; %s\'' % (host, source_file, export_mdd, command))
cmd.run(validateAfter=True)
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def run_gpcommand(context, command, cmd_prefix=''):
context.exception = None
cmd = Command(name='run %s' % command, cmdStr='$GPHOME/bin/%s' % (command))
if cmd_prefix:
cmd = Command(name='run %s' % command, cmdStr='%s;$GPHOME/bin/%s' % (cmd_prefix, command))
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
context.exception = e
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def run_gpcommand_async(context, command):
cmd = Command(name='run %s' % command, cmdStr='$GPHOME/bin/%s' % (command))
context.asyncproc = cmd.runNoWait()
def check_stdout_msg(context, msg):
pat = re.compile(msg)
if not pat.search(context.stdout_message):
err_str = "Expected stdout string '%s' and found: '%s'" % (msg, context.stdout_message)
raise Exception(err_str)
def check_string_not_present_stdout(context, msg):
pat = re.compile(msg)
if pat.search(context.stdout_message):
err_str = "Did not expect stdout string '%s' but found: '%s'" % (msg, context.stdout_message)
raise Exception(err_str)
def check_err_msg(context, err_msg):
if not hasattr(context, 'exception'):
raise Exception('An exception was not raised and it was expected')
pat = re.compile(err_msg)
if not pat.search(context.error_message):
err_str = "Expected error string '%s' and found: '%s'" % (err_msg, context.error_message)
raise Exception(err_str)
def check_return_code(context, ret_code):
if context.ret_code != int(ret_code):
emsg = ""
if context.error_message:
emsg += "STDERR:\n%s\n" % context.error_message
if context.stdout_message:
emsg += "STDOUT:\n%s\n" % context.stdout_message
raise Exception("expected return code '%s' does not equal actual return code '%s' \n%s" % (ret_code, context.ret_code, emsg))
def check_not_return_code(context, ret_code):
if context.ret_code == int(ret_code):
emsg = ""
if context.error_message:
emsg += context.error_message
raise Exception("return code unexpectedly equals '%s' %s" % (ret_code, emsg))
def check_database_is_running(context):
if not 'PGPORT' in os.environ:
raise Exception('PGPORT should be set')
pgport = int(os.environ['PGPORT'])
running_status = chk_local_db_running(master_data_dir, pgport)
gpdb_running = running_status[0] and running_status[1] and running_status[2] and running_status[3]
return gpdb_running
def start_database_if_not_started(context):
if not check_database_is_running(context):
start_database(context)
def start_database(context):
run_gpcommand(context, 'gpstart -a')
if context.exception:
raise context.exception
def stop_database_if_started(context):
if check_database_is_running(context):
stop_database(context)
def stop_database(context):
run_gpcommand(context, 'gpstop -M fast -a')
if context.exception:
raise context.exception
def getRows(dbname, exec_sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
results = curs.fetchall()
return results
def getRow(dbname, exec_sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
result = curs.fetchone()
return result
def check_db_exists(dbname, host=None, port=0, user=None):
LIST_DATABASE_SQL = 'SELECT datname FROM pg_database'
results = []
with dbconn.connect(dbconn.DbURL(hostname=host, username=user, port=port, dbname='template1')) as conn:
curs = dbconn.execSQL(conn, LIST_DATABASE_SQL)
results = curs.fetchall()
for result in results:
if result[0] == dbname:
return True
return False
def create_database_if_not_exists(context, dbname, host=None, port=0, user=None):
if not check_db_exists(dbname, host, port, user):
create_database(context, dbname, host, port, user)
def create_database(context, dbname=None, host=None, port=0, user=None):
LOOPS = 10
if host == None or port == 0 or user == None:
createdb_cmd = 'createdb %s' % dbname
else:
createdb_cmd = 'psql -h %s -p %d -U %s -d template1 -c "create database %s"' % (host,
port, user, dbname)
for i in range(LOOPS):
context.exception = None
run_command(context, createdb_cmd)
if context.exception:
time.sleep(1)
continue
if check_db_exists(dbname, host, port, user):
return
time.sleep(1)
if context.exception:
raise context.exception
raise Exception("create database for '%s' failed after %d attempts" % (dbname, LOOPS))
def get_segment_hostnames(context, dbname):
sql = "SELECT DISTINCT(hostname) FROM gp_segment_configuration WHERE content != -1;"
return getRows(dbname, sql)
def check_partition_table_exists(context, dbname, schemaname, table_name, table_type=None, part_level=1, part_number=1):
partitions = get_partition_names(schemaname, table_name, dbname, part_level, part_number)
if not partitions:
return False
return check_table_exists(context, dbname, partitions[0][0].strip(), table_type)
def check_table_exists(context, dbname, table_name, table_type=None, host=None, port=0, user=None):
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
if '.' in table_name:
schemaname, tablename = table_name.split('.')
SQL_format = """
SELECT c.oid, c.relkind, c.relstorage, c.reloptions
FROM pg_class c, pg_namespace n
WHERE c.relname = '%s' AND n.nspname = '%s' AND c.relnamespace = n.oid;
"""
SQL = SQL_format % (escape_string(tablename, conn=conn), escape_string(schemaname, conn=conn))
else:
SQL_format = """
SELECT oid, relkind, relstorage, reloptions \
FROM pg_class \
WHERE relname = E'%s';\
"""
SQL = SQL_format % (escape_string(table_name, conn=conn))
table_row = None
try:
table_row = dbconn.execSQLForSingletonRow(conn, SQL)
except Exception as e:
context.exception = e
return False
if table_type is None:
return True
if table_row[2] == 'a':
original_table_type = 'ao'
elif table_row[2] == 'c':
original_table_type = 'co'
elif table_row[2] == 'h':
original_table_type = 'heap'
elif table_row[2] == 'x':
original_table_type = 'external'
elif table_row[2] == 'v':
original_table_type = 'view'
else:
raise Exception('Unknown table type %s' % table_row[2])
if original_table_type != table_type.strip():
return False
return True
def drop_external_table_if_exists(context, table_name, dbname):
if check_table_exists(context, table_name=table_name, dbname=dbname, table_type='external'):
drop_external_table(context, table_name=table_name, dbname=dbname)
def drop_table_if_exists(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop table if exists %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
def drop_external_table(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop external table %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_table_exists(context, table_name=table_name, dbname=dbname, table_type='external', host=host, port=port,
user=user):
raise Exception('Unable to successfully drop the table %s' % table_name)
def drop_table(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop table %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, username=user, port=port, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_table_exists(context, table_name=table_name, dbname=dbname, host=host, port=port, user=user):
raise Exception('Unable to successfully drop the table %s' % table_name)
def check_schema_exists(context, schema_name, dbname):
schema_check_sql = "select * from pg_namespace where nspname='%s';" % schema_name
if len(getRows(dbname, schema_check_sql)) < 1:
return False
return True
def drop_schema_if_exists(context, schema_name, dbname):
if check_schema_exists(context, schema_name, dbname):
drop_schema(context, schema_name, dbname)
def drop_schema(context, schema_name, dbname):
SQL = 'drop schema %s cascade' % schema_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_schema_exists(context, schema_name, dbname):
raise Exception('Unable to successfully drop the schema %s' % schema_name)
def get_table_names(dbname):
sql = """
SELECT n.nspname AS schemaname, c.relname AS tablename\
FROM pg_class c\
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace\
LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace\
WHERE c.relkind = 'r'::CHAR AND c.oid > 16384 AND (c.relnamespace > 16384 OR n.nspname = 'public')
AND n.nspname NOT LIKE 'pg_temp_%'
"""
return getRows(dbname, sql)
def get_partition_tablenames(tablename, dbname, part_level=1):
child_part_sql = "select partitiontablename from pg_partitions where tablename='%s' and partitionlevel=%s;" % (
tablename, part_level)
rows = getRows(dbname, child_part_sql)
return rows
def get_partition_names(schemaname, tablename, dbname, part_level, part_number):
part_num_sql = """select partitionschemaname || '.' || partitiontablename from pg_partitions
where schemaname='%s' and tablename='%s'
and partitionlevel=%s and partitionposition=%s;""" % (
schemaname, tablename, part_level, part_number)
rows = getRows(dbname, part_num_sql)
return rows
def validate_part_table_data_on_segments(context, tablename, part_level, dbname):
rows = get_partition_tablenames(tablename, dbname, part_level)
for part_tablename in rows:
seg_data_sql = "select gp_segment_id, count(*) from gp_dist_random('%s') group by gp_segment_id;" % \
part_tablename[0]
rows = getRows(dbname, seg_data_sql)
for row in rows:
if row[1] == '0':
raise Exception('Data not present in segment %s' % row[0])
def create_external_partition(context, tablename, dbname, port, filename):
table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
create_table_str = "Create table %s (%s) Distributed randomly \
Partition by range(Column3) ( \
partition p_1 start(date '2010-01-01') end(date '2011-01-01') with (appendonly=true, orientation=column, compresstype=zlib, compresslevel=1), \
partition p_2 start(date '2011-01-01') end(date '2012-01-01') with (appendonly=true, orientation=row, compresstype=zlib, compresslevel=1), \
partition s_3 start(date '2012-01-01') end(date '2013-01-01') with (appendonly=true, orientation=column), \
partition s_4 start(date '2013-01-01') end(date '2014-01-01') with (appendonly=true, orientation=row), \
partition s_5 start(date '2014-01-01') end(date '2015-01-01') ) \
;" % (tablename, table_definition)
master_hostname = get_master_hostname();
create_ext_table_str = "Create readable external table %s_ret (%s) \
location ('gpfdist://%s:%s/%s') \
format 'csv' encoding 'utf-8' \
log errors segment reject limit 1000 \
;" % (tablename, table_definition, master_hostname[0][0].strip(), port, filename)
alter_table_str = "Alter table %s exchange partition p_2 \
with table %s_ret without validation \
;" % (tablename, tablename)
drop_table_str = "Drop table %s_ret;" % (tablename)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, create_table_str)
dbconn.execSQL(conn, create_ext_table_str)
dbconn.execSQL(conn, alter_table_str)
dbconn.execSQL(conn, drop_table_str)
conn.commit()
populate_partition(tablename, '2010-01-01', dbname, 0, 100)
def create_partition(context, tablename, storage_type, dbname, compression_type=None, partition=True, rowcount=1094,
with_data=True, host=None, port=0, user=None):
interval = '1 year'
table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
create_table_str = "Create table " + tablename + "(" + table_definition + ")"
storage_type_dict = {'ao': 'row', 'co': 'column'}
part_table = " Distributed Randomly Partition by list(Column2) \
Subpartition by range(Column3) Subpartition Template \
(start (date '%s') end (date '%s') every (interval '%s')) \
(Partition p1 values('backup') , Partition p2 values('restore')) " \
% (PARTITION_START_DATE, PARTITION_END_DATE, interval)
if storage_type == "heap":
create_table_str = create_table_str
if partition:
create_table_str = create_table_str + part_table
elif storage_type == "ao" or storage_type == "co":
create_table_str = create_table_str + " WITH(appendonly = true, orientation = %s) " % storage_type_dict[
storage_type]
if compression_type is not None:
create_table_str = create_table_str[:-2] + ", compresstype = " + compression_type + ") "
if partition:
create_table_str = create_table_str + part_table
create_table_str = create_table_str + ";"
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, create_table_str)
conn.commit()
if with_data:
populate_partition(tablename, PARTITION_START_DATE, dbname, 0, rowcount, host, port, user)
# same data size as populate partition, but different values
def populate_partition(tablename, start_date, dbname, data_offset, rowcount=1094, host=None, port=0, user=None):
insert_sql_str = "insert into %s select i+%d, 'backup', i + date '%s' from generate_series(0,%d) as i" % (
tablename, data_offset, start_date, rowcount)
insert_sql_str += "; insert into %s select i+%d, 'restore', i + date '%s' from generate_series(0,%d) as i" % (
tablename, data_offset, start_date, rowcount)
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, insert_sql_str)
conn.commit()
def create_indexes(context, table_name, indexname, dbname):
btree_index_sql = "create index btree_%s on %s using btree(column1);" % (indexname, table_name)
bitmap_index_sql = "create index bitmap_%s on %s using bitmap(column3);" % (indexname, table_name)
index_sql = btree_index_sql + bitmap_index_sql
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, index_sql)
conn.commit()
validate_index(context, table_name, dbname)
def validate_index(context, table_name, dbname):
index_sql = "select count(indexrelid::regclass) from pg_index, pg_class where indrelid = '%s'::regclass group by indexrelid;" % table_name
rows = getRows(dbname, index_sql)
if len(rows) != 2:
raise Exception('Index creation was not successful. Expected 2 rows does not match %d rows' % len(rows))
def create_schema(context, schema_name, dbname):
if not check_schema_exists(context, schema_name, dbname):
schema_sql = "create schema %s" % schema_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, schema_sql)
conn.commit()
def create_int_table(context, table_name, table_type='heap', dbname='testdb'):
CREATE_TABLE_SQL = None
NROW = 1000
table_type = table_type.upper()
if table_type == 'AO':
CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE) as select generate_series(1,%d) as c1' % (
table_name, NROW)
elif table_type == 'CO':
CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE, orientation=column) as select generate_series(1, %d) as c1' % (
table_name, NROW)
elif table_type == 'HEAP':
CREATE_TABLE_SQL = 'create table %s as select generate_series(1, %d) as c1' % (table_name, NROW)
if CREATE_TABLE_SQL is None:
raise Exception('Invalid table type specified')
SELECT_TABLE_SQL = 'select count(*) from %s' % table_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, CREATE_TABLE_SQL)
conn.commit()
result = dbconn.execSQLForSingleton(conn, SELECT_TABLE_SQL)
if result != NROW:
raise Exception('Integer table creation was not successful. Expected %d does not match %d' % (NROW, result))
def drop_database(context, dbname, host=None, port=0, user=None):
LOOPS = 10
if host == None or port == 0 or user == None:
dropdb_cmd = 'dropdb %s' % dbname
else:
dropdb_cmd = 'psql -h %s -p %d -U %s -d template1 -c "drop database %s"' % (host,
port, user, dbname)
for i in range(LOOPS):
context.exception = None
run_gpcommand(context, dropdb_cmd)
if context.exception:
time.sleep(1)
continue
if not check_db_exists(dbname):
return
time.sleep(1)
if context.exception:
raise context.exception
raise Exception('db exists after dropping: %s' % dbname)
def drop_database_if_exists(context, dbname=None, host=None, port=0, user=None):
if check_db_exists(dbname, host=host, port=port, user=user):
drop_database(context, dbname, host=host, port=port, user=user)
def run_on_all_segs(context, dbname, query):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
with dbconn.connect(dbconn.DbURL(dbname=dbname, hostname=seg.getSegmentHostName(), port=seg.getSegmentPort()),
utility=True) as conn:
dbconn.execSQL(conn, query)
conn.commit()
def get_nic_up(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='ifconfig nic', cmdStr='sudo /sbin/ifconfig %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
return 'UP' in cmd.get_results().stdout
def bring_nic_down(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='bring down nic', cmdStr='sudo /sbin/ifdown %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
if get_nic_up(hostname, nic):
raise Exception('Unable to bring down nic %s on host %s' % (nic, hostname))
def bring_nic_up(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='bring up nic', cmdStr='sudo /sbin/ifup %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
if not get_nic_up(hostname, nic):
raise Exception('Unable to bring up nic %s on host %s' % (nic, hostname))
def are_segments_synchronized():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for seg in segments:
if seg.mode != MODE_SYNCHRONIZED and not seg.isSegmentMaster(True):
return False
return True
def is_any_segment_resynchronized():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for seg in segments:
if seg.mode == MODE_RESYNCHRONIZATION:
return True
return False
def check_row_count(tablename, dbname, nrows):
NUM_ROWS_QUERY = 'select count(*) from %s' % tablename
# We want to bubble up the exception so that if table does not exist, the test fails
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
if result != nrows:
raise Exception('%d rows in table %s.%s, expected row count = %d' % (result, dbname, tablename, nrows))
def match_table_select(context, src_tablename, src_dbname, dest_tablename, dest_dbname, orderby=None, options=''):
if orderby != None:
dest_tbl_qry = 'psql -d %s -c \'select * from %s order by %s\' %s' % (
dest_dbname, dest_tablename, orderby, options)
src_tbl_qry = '''psql -p %s -h %s -U %s -d %s -c \'select * from %s order by %s\' %s''' % (
os.environ.get('GPTRANSFER_SOURCE_PORT'),
os.environ.get('GPTRANSFER_SOURCE_HOST'),
os.environ.get('GPTRANSFER_SOURCE_USER'),
src_dbname, src_tablename, orderby, options)
else:
dest_tbl_qry = 'psql -d %s -c \'select * from %s\' %s' % (dest_dbname, dest_tablename, options)
src_tbl_qry = '''psql -p %s -h %s -U %s -d %s -c \'select * from %s\' %s''' % (
os.environ.get('GPTRANSFER_SOURCE_PORT'),
os.environ.get('GPTRANSFER_SOURCE_HOST'),
os.environ.get('GPTRANSFER_SOURCE_USER'),
src_dbname, src_tablename, options)
(_, dest_content, _) = run_cmd(dest_tbl_qry)
(_, src_content, _) = run_cmd(src_tbl_qry)
if src_content != dest_content:
raise Exception('''table %s in database %s of source system does not match rows with table %s in database %s of destination system.\n
destination table content:\n%s\n
source table content:\n%s\n''' % (
src_tablename, src_dbname, dest_tablename, dest_dbname, dest_content, src_content))
def get_master_hostname(dbname='template1'):
master_hostname_sql = "SELECT DISTINCT hostname FROM gp_segment_configuration WHERE content=-1 AND role='p'"
return getRows(dbname, master_hostname_sql)
def get_hosts_and_datadirs(dbname='template1'):
get_hosts_and_datadirs_sql = "SELECT hostname, fselocation FROM gp_segment_configuration, pg_filespace_entry WHERE fsedbid = dbid AND role='p';"
return getRows(dbname, get_hosts_and_datadirs_sql)
def get_hosts(dbname='template1'):
get_hosts_sql = "SELECT DISTINCT hostname FROM gp_segment_configuration WHERE role='p';"
return getRows(dbname, get_hosts_sql)
def truncate_table(dbname, tablename):
TRUNCATE_SQL = 'TRUNCATE %s' % tablename
execute_sql(dbname, TRUNCATE_SQL)
def get_table_oid(context, dbname, schema, tablename):
OID_SQL = """SELECT c.oid
FROM pg_class c, pg_namespace n
WHERE c.relnamespace = n.oid AND c.relname = '%s' AND n.nspname = '%s'""" % (tablename, schema)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
oid = dbconn.execSQLForSingleton(conn, OID_SQL)
return oid
def insert_row(context, row_values, table, dbname):
sql = """INSERT INTO %s values(%s)""" % (table, row_values)
execute_sql(dbname, sql)
def get_partition_list(partition_type, dbname):
if partition_type == 'ao':
sql = GET_ALL_AO_DATATABLES_SQL
elif partition_type == 'co':
sql = GET_ALL_CO_DATATABLES_SQL
partition_list = getRows(dbname, sql)
for line in partition_list:
if len(line) != 4:
raise Exception('Invalid results from query to get all AO tables: [%s]' % (','.join(line)))
return partition_list
def get_all_hostnames_as_list(context, dbname):
hosts = []
segs = get_segment_hostnames(context, dbname)
for seg in segs:
hosts.append(seg[0].strip())
masters = get_master_hostname(dbname)
for master in masters:
hosts.append(master[0].strip())
return hosts
def get_pid_for_segment(seg_data_dir, seg_host):
cmd = Command(name='get list of postmaster processes',
cmdStr='ps -eaf | grep %s' % seg_data_dir,
ctxt=REMOTE,
remoteHost=seg_host)
cmd.run(validateAfter=True)
pid = None
results = cmd.get_results().stdout.strip().split('\n')
for res in results:
if 'grep' not in res:
pid = res.split()[1]
if pid is None:
return None
return int(pid)
def install_gppkg(context):
if 'GPPKG_PATH' not in os.environ:
raise Exception('GPPKG_PATH needs to be set in the environment to install gppkg')
if 'GPPKG_NAME' not in os.environ:
raise Exception('GPPKG_NAME needs to be set in the environment to install gppkg')
gppkg_path = os.environ['GPPKG_PATH']
gppkg_name = os.environ['GPPKG_NAME']
command = "gppkg --install %s/%s.gppkg" % (gppkg_path, gppkg_name)
run_command(context, command)
print "Install gppkg command: '%s', stdout: '%s', stderr: '%s'" % (
command, context.stdout_message, context.error_message)
def kill_process(pid, host=None, sig=signal.SIGTERM):
if host is not None:
cmd = Command('kill process on a given host',
cmdStr='kill -%d %d' % (sig, pid),
ctxt=REMOTE,
remoteHost=host)
cmd.run(validateAfter=True)
else:
os.kill(pid, sig)
def has_process_eventually_stopped(proc, host=None):
start_time = current_time = datetime.now()
is_running = False
while (current_time - start_time).seconds < 120:
is_running = is_process_running(proc, host)
if not is_running:
break
time.sleep(2)
current_time = datetime.now()
return not is_running
def check_user_permissions(file_name, access_mode):
st = os.stat(file_name)
if access_mode == 'write':
return bool(st.st_mode & stat.S_IWUSR)
elif access_mode == 'read':
return bool(st.st_mode & stat.S_IRUSR)
elif access_mode == 'execute':
return bool(st.st_mode & stat.S_IXUSR)
else:
raise Exception('Invalid mode specified, should be read, write or execute only')
def are_segments_running():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for seg in segments:
if seg.status != 'u':
return False
return True
def modify_sql_file(file, hostport):
if os.path.isfile(file):
for line in fileinput.FileInput(file, inplace=1):
if line.find("gpfdist") >= 0:
line = re.sub('(\d+)\.(\d+)\.(\d+)\.(\d+)\:(\d+)', hostport, line)
print str(re.sub('\n', '', line))
def remove_dir(host, directory):
cmd = 'gpssh -h %s -e \'rm -rf %s\'' % (host, directory)
run_cmd(cmd)
def create_dir(host, directory):
cmd = 'gpssh -h %s -e \'mkdir -p %s\'' % (host, directory)
run_cmd(cmd)
def check_count_for_specific_query(dbname, query, nrows):
NUM_ROWS_QUERY = '%s' % query
# We want to bubble up the exception so that if table does not exist, the test fails
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
if result != nrows:
raise Exception('%d rows in query: %s. Expected row count = %d' % (result, query, nrows))
def get_primary_segment_host_port():
"""
return host, port of primary segment (dbid 2)
"""
FIRST_PRIMARY_DBID = 2
get_psegment_sql = 'select hostname, port from gp_segment_configuration where dbid=%i;' % FIRST_PRIMARY_DBID
with dbconn.connect(dbconn.DbURL(dbname='template1')) as conn:
cur = dbconn.execSQL(conn, get_psegment_sql)
rows = cur.fetchall()
primary_seg_host = rows[0][0]
primary_seg_port = rows[0][1]
return primary_seg_host, primary_seg_port
def remove_local_path(dirname):
list = glob.glob(os.path.join(os.path.curdir, dirname))
for dir in list:
shutil.rmtree(dir, ignore_errors=True)
def validate_local_path(path):
list = glob.glob(os.path.join(os.path.curdir, path))
return len(list)
def populate_regular_table_data(context, tabletype, table_name, compression_type, dbname, rowcount=1094,
with_data=False, host=None, port=0, user=None):
create_database_if_not_exists(context, dbname, host=host, port=port, user=user)
drop_table_if_exists(context, table_name=table_name, dbname=dbname, host=host, port=port, user=user)
if compression_type == "None":
create_partition(context, table_name, tabletype, dbname, compression_type=None, partition=False,
rowcount=rowcount, with_data=with_data, host=host, port=port, user=user)
else:
create_partition(context, table_name, tabletype, dbname, compression_type, partition=False,
rowcount=rowcount, with_data=with_data, host=host, port=port, user=user)
def is_process_running(proc_name, host=None):
if host is not None:
cmd = Command(name='pgrep for %s' % proc_name,
cmdStr="pgrep %s" % proc_name,
ctxt=REMOTE,
remoteHost=host)
else:
cmd = Command(name='pgrep for %s' % proc_name,
cmdStr="pgrep %s" % proc_name)
cmd.run()
if cmd.get_return_code() > 1:
raise Exception("unexpected problem with pgrep, return code: %s" % cmd.get_return_code())
return cmd.get_return_code() == 0
def file_contains_line(filepath, target_line):
with open(filepath, 'r') as myfile:
return target_line in myfile.read().splitlines()
def replace_special_char_env(str):
for var in ["SP_CHAR_DB", "SP_CHAR_SCHEMA", "SP_CHAR_AO", "SP_CHAR_CO", "SP_CHAR_HEAP"]:
if var in os.environ:
str = str.replace("$%s" % var, os.environ[var])
return str
def escape_string(string, conn):
return pg.DB(db=conn).escape_string(string)
|
|
# coding: utf-8
import setpath
from gzip import zlib
import subprocess
import functions
import time
import urllib2
import urllib
from lib import jopts
from functions.conf import domainExtraHeaders
import lib.gzip32 as gzip
try:
from collections import OrderedDict
except ImportError:
# Python 2.6
from lib.collections26 import OrderedDict
def gz(*args):
"""
.. function:: gz(text) -> gzip compressed blob
Function *gz* compresses its input with gzip's maximum compression level.
Examples:
>>> table1('''
... "qwerqewrqwerqwerqwerqwerqwer"
... "asdfasdfasdfasdfasdfasdfsadf"
... ''')
>>> sql("select length(a), length(gz(a)) from table1")
length(a) | length(gz(a))
-------------------------
28 | 20
28 | 18
"""
return buffer(zlib.compress(args[0], 9))
gz.registered=True
def ungz(*args):
"""
.. function:: ungz(blob) -> text
Function *ungz* decompresses gzip blobs. If the input blobs aren't gzip
compressed, then it just returns them as they are.
Examples:
>>> table1('''
... "qwerqwerqwer"
... "asdfasdfasdf"
... ''')
>>> sql("select ungz(gz(a)) from table1")
ungz(gz(a))
------------
qwerqwerqwer
asdfasdfasdf
>>> sql("select ungz('string'), ungz(123)")
ungz('string') | ungz(123)
--------------------------
string | 123
"""
try:
return zlib.decompress(args[0])
except KeyboardInterrupt:
raise
except:
return args[0]
ungz.registered=True
def urlrequest(*args):
"""
.. function:: urlrequest([null], url) -> response
This functions connects to the *url* (via GET HTTP method) and returns the request's result. If first
parameter is *null*, then in case of errors *null* will be returned.
Examples:
>>> sql("select urlrequest('http://www.google.com/not_existing')")
Traceback (most recent call last):
...
HTTPError: HTTP Error 404: Not Found
>>> sql("select urlrequest(null, 'http://www.google.com/not_existing') as result")
result
------
None
"""
try:
req = urllib2.Request(''.join((x for x in args if x != None)), None, domainExtraHeaders)
hreq = urllib2.urlopen(req)
if [1 for x,y in hreq.headers.items() if x.lower() in ('content-encoding', 'content-type') and y.lower().find('gzip')!=-1]:
hreq = gzip.GzipFile(fileobj=hreq)
return unicode(hreq.read(), 'utf-8', errors = 'replace')
except urllib2.HTTPError,e:
if args[0] == None:
return None
else:
raise e
urlrequest.registered=True
def urlrequestpost(*args):
"""
.. function:: urlrequestpost(data_jdict, [null], url) -> response
This functions connects to the *url* (via POST HTTP method), submits the *data_jdict*, and returns the request's result. If second
parameter is *null*, then in case of errors *null* will be returned.
Examples:
>>> sql('''select urlrequestpost('{"POST_param_name":"data"}', 'http://www.google.com/not_existing')''')
Traceback (most recent call last):
...
HTTPError: HTTP Error 404: Not Found
>>> sql('''select urlrequestpost('["POST_param_name","data"]', null, 'http://www.google.com/not_existing') as result''')
result
------
None
>>> sql("select urlrequestpost(jdict('param1','value1'), null, 'http://www.google.com/not_existing') as result")
result
------
None
>>> sql("select urlrequestpost(jpack('param1','value1'), null, 'http://www.google.com/not_existing') as result")
result
------
None
"""
try:
req = urllib2.Request(''.join((x for x in args[1:] if x != None)), None, domainExtraHeaders)
datain = jopts.fromjsingle(args[0])
dataout = []
if type(datain) == list:
for i in xrange(0, len(datain), 2):
dataout.append((datain[i].encode('utf_8'), datain[i+1].encode('utf_8')))
else:
dataout = [( x.encode('utf_8'), y.encode('utf_8') ) for x,y in datain.items()]
if dataout == []:
raise functions.OperatorError('urlrequestpost',"A list or dict should be provided")
hreq = urllib2.urlopen(req, urllib.urlencode(dataout))
if [1 for x,y in hreq.headers.items() if x.lower() in ('content-encoding', 'content-type') and y.lower().find('gzip')!=-1]:
hreq = gzip.GzipFile(fileobj=hreq)
return unicode(hreq.read(), 'utf-8', errors = 'replace')
except urllib2.HTTPError,e:
if args[1] == None:
return None
else:
raise e
urlrequestpost.registered=True
def failif(*args):
"""
.. function:: failif(condition [, messsage])
If condition is true, raises an error. If message is provided, the message is included in
raised error.
Examples:
>>> sql("select failif(1=1,'exception') as answer") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator FAILIF: exception
>>> sql("select failif(1=0,'exception') as answer") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
answer
------
0
>>> sql("select failif(1=1) as answer") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator FAILIF: an error was found
"""
if len(args)>3:
raise functions.OperatorError('failif','operator needs one or two input')
if args[0]:
if len(args)==2:
raise functions.OperatorError('failif', args[1])
else:
raise functions.OperatorError('failif', 'an error was found')
return args[0]
failif.registered=True
def execprogram(*args):
"""
.. function:: execprogram(stdin=null, program_name, parameters, [raise_error]) -> text or blob
Function *execprogram* executes a shell command and returns its output. If the
value of the first argument is not *null*, the arguments value will be pushed in program's Standard Input.
If the program doesn't return a *0* return code, then a madIS error will be raised, containing
the contents of the program's error stream.
If the last argument of *execprogram* is set to *null*, then all program errors will be returned as *null*
(see "cat non_existent_file" examples below).
Every one of the program's parameters must be provided as different arguments of the *execprogram* call
(see "cat -n" example below).
.. note::
Function *execprogram* tries by default to convert the program's output to UTF-8. If the conversion
isn't succesfull, then it returns the output as a binary blob.
Examples:
>>> table1('''
... echo test
... echo 1
... ''')
>>> sql("select execprogram(null, a, b) from table1")
execprogram(null, a, b)
-----------------------
test
1
>>> sql("select execprogram(null, null, '-l')") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator EXECPROGRAM: Second parameter should be the name of the program to run
>>> sql("select execprogram(null, null, '-l', null)") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
execprogram(null, null, '-l', null)
-----------------------------------
None
>>> sql("select execprogram('test', 'cat')")
execprogram('test', 'cat')
--------------------------
test
>>> sql('''select execprogram('test', 'cat', '-n')''') #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
execprogram('test', 'cat', '-n')
--------------------------------
1 test
>>> sql("select execprogram(null, 'NON_EXISTENT_PROGRAM')") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator EXECPROGRAM: [Errno 2] No such file or directory
>>> sql("select execprogram(null, 'cat', 'non_existent_file')") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator EXECPROGRAM: cat: non_existent_file: No such file or directory
>>> sql("select execprogram(null, 'cat', 'non_existent_file', null)") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
execprogram(null, 'cat', 'non_existent_file', null)
---------------------------------------------------
None
"""
if len(args)<2:
raise functions.OperatorError('execprogram', "First parameter should be data to provide to program's STDIN, or null")
raise_error=False
if len(args)>2 and args[-1]==None:
raise_error=True
if args[1]==None:
if raise_error:
return None
else:
raise functions.OperatorError('execprogram', "Second parameter should be the name of the program to run")
outtext=errtext=''
try:
p=subprocess.Popen([unicode(x) for x in args[1:] if x!=None], stdin=subprocess.PIPE if args[0]!=None else None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if args[0]==None:
outtext, errtext=p.communicate()
else:
val = args[0]
valtype = type(val)
if valtype == unicode:
val = val.encode('utf-8')
if valtype in (int,float):
val = str(val)
outtext, errtext=p.communicate( val )
except Exception,e:
raise functions.OperatorError('execprogram', functions.mstr(e))
if p.returncode!=0:
if raise_error:
return None
else:
raise functions.OperatorError('execprogram', functions.mstr(errtext).strip())
try:
outtext=unicode(outtext, 'utf-8')
except KeyboardInterrupt:
raise
except:
return buffer(outtext)
return outtext
execprogram.registered=True
def sleep(*args):
"""
.. function:: sleep(seconds)
This function waits for the given number of seconds before returning. The *seconds* parameters can
be fractional (e.g. *0.1* will sleep for 100 milliseconds).
Examples:
>>> sql("select sleep(0.1)")
sleep(0.1)
----------
0.1
"""
t = args[0]
if t<0:
t=0
time.sleep(t)
return t
sleep.registered=True
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
|
|
# =============================================================================
# OWSLib. Copyright (C) 2005 Sean C. Gillies
#
# Contact email: sgillies@frii.com
#
# $Id: wfs.py 503 2006-02-01 17:09:12Z dokai $
# =============================================================================
from __future__ import (absolute_import, division, print_function)
import cgi
from cStringIO import StringIO
from urllib import urlencode
from urllib2 import urlopen
from bcube_owslib.util import openURL, testXMLValue, extract_xml_list, ServiceException, xmltag_split
from bcube_owslib.etree import etree
from bcube_owslib.fgdc import Metadata
from bcube_owslib.iso import MD_Metadata
from bcube_owslib.crs import Crs
from bcube_owslib.namespaces import Namespaces
from bcube_owslib.util import log
n = Namespaces()
WFS_NAMESPACE = n.get_namespace("wfs")
OGC_NAMESPACE = n.get_namespace("ogc")
# TODO: use nspath in util.py
def nspath(path, ns=WFS_NAMESPACE):
"""
Prefix the given path with the given namespace identifier.
Parameters
----------
path : string
ElementTree API Compatible path expression
ns : string
The XML namespace. Defaults to WFS namespace.
"""
components = []
for component in path.split("/"):
if component != '*':
component = "{%s}%s" % (ns, component)
components.append(component)
return "/".join(components)
class WebFeatureService_1_0_0(object):
"""Abstraction for OGC Web Feature Service (WFS).
Implements IWebFeatureService.
"""
def __new__(self,url, version, xml, parse_remote_metadata=False, timeout=30):
""" overridden __new__ method
@type url: string
@param url: url of WFS capabilities document
@type xml: string
@param xml: elementtree object
@type parse_remote_metadata: boolean
@param parse_remote_metadata: whether to fully process MetadataURL elements
@param timeout: time (in seconds) after which requests should timeout
@return: initialized WebFeatureService_1_0_0 object
"""
obj = object.__new__(self)
obj.__init__(url, version, xml, parse_remote_metadata, timeout)
return obj
def __getitem__(self, name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, version, xml=None, parse_remote_metadata=False, timeout=30):
"""Initialize."""
self.url = url
self.version = version
self.timeout = timeout
self._capabilities = None
reader = WFSCapabilitiesReader(self.version)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url)
self._buildMetadata(parse_remote_metadata)
def _buildMetadata(self, parse_remote_metadata=False):
'''set up capabilities metadata objects: '''
# serviceIdentification metadata
serviceelem = self._capabilities.find(nspath('Service'))
self.identification = ServiceIdentification(serviceelem, self.version)
# serviceProvider metadata
self.provider = ServiceProvider(serviceelem)
# serviceOperations metadata
self.operations = []
for elem in self._capabilities.find(nspath('Capability/Request'))[:]:
self.operations.append(OperationMetadata(elem))
# serviceContents metadata: our assumption is that services use a top-level
# layer as a metadata organizer, nothing more.
self.contents = {}
featuretypelist = self._capabilities.find(nspath('FeatureTypeList'))
features = self._capabilities.findall(nspath('FeatureTypeList/FeatureType'))
for feature in features:
cm = ContentMetadata(feature, featuretypelist, parse_remote_metadata)
self.contents[cm.id] = cm
# exceptions
self.exceptions = [f.text for f
in self._capabilities.findall('Capability/Exception/Format')]
def getcapabilities(self):
"""Request and return capabilities document from the WFS as a
file-like object.
NOTE: this is effectively redundant now"""
reader = WFSCapabilitiesReader(self.version)
return urlopen(reader.capabilities_url(self.url), timeout=self.timeout)
def items(self):
'''supports dict-like items() access'''
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def getfeature(self, typename=None, filter=None, bbox=None, featureid=None,
featureversion=None, propertyname=['*'], maxfeatures=None,
srsname=None, outputFormat=None, method='{http://www.opengis.net/wfs}Get',
startindex=None):
"""Request and return feature data as a file-like object.
Parameters
----------
typename : list
List of typenames (string)
filter : string
XML-encoded OGC filter expression.
bbox : tuple
(left, bottom, right, top) in the feature type's coordinates.
featureid : list
List of unique feature ids (string)
featureversion : string
Default is most recent feature version.
propertyname : list
List of feature property names. '*' matches all.
maxfeatures : int
Maximum number of features to be returned.
method : string
Qualified name of the HTTP DCP method to use.
srsname: string
EPSG code to request the data in
outputFormat: string (optional)
Requested response format of the request.
startindex: int (optional)
Start position to return feature set (paging in combination with maxfeatures)
There are 3 different modes of use
1) typename and bbox (simple spatial query)
2) typename and filter (more expressive)
3) featureid (direct access to known features)
"""
try:
base_url = next(
(
m.get('url') for m in self.getOperationByName('GetFeature').methods
if m.get('type').lower() == method.lower()
)
)
except StopIteration:
base_url = self.url
request = {'service': 'WFS', 'version': self.version, 'request': 'GetFeature'}
# check featureid
if featureid:
request['featureid'] = ','.join(featureid)
elif bbox and typename:
request['bbox'] = ','.join([repr(x) for x in bbox])
elif filter and typename:
request['filter'] = str(filter)
if srsname:
request['srsname'] = str(srsname)
assert len(typename) > 0
request['typename'] = ','.join(typename)
if propertyname:
request['propertyname'] = ','.join(propertyname)
if featureversion:
request['featureversion'] = str(featureversion)
if maxfeatures:
request['maxfeatures'] = str(maxfeatures)
if startindex:
request['startindex'] = str(startindex)
if outputFormat is not None:
request["outputFormat"] = outputFormat
data = urlencode(request)
log.debug("Making request: %s?%s" % (base_url, data))
u = openURL(base_url, data, method, timeout=self.timeout)
# check for service exceptions, rewrap, and return
# We're going to assume that anything with a content-length > 32k
# is data. We'll check anything smaller.
try:
length = int(u.info()['Content-Length'])
have_read = False
except (KeyError, AttributeError):
data = u.read()
have_read = True
length = len(data)
if length < 32000:
if not have_read:
data = u.read()
try:
tree = etree.fromstring(data)
except BaseException:
# Not XML
return StringIO(data)
else:
if tree.tag == "{%s}ServiceExceptionReport" % OGC_NAMESPACE:
se = tree.find(nspath('ServiceException', OGC_NAMESPACE))
raise ServiceException(str(se.text).strip())
else:
return StringIO(data)
else:
if have_read:
return StringIO(data)
return u
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class ServiceIdentification(object):
''' Implements IServiceIdentificationMetadata '''
def __init__(self, infoset, version):
self._root = infoset
self.type = testXMLValue(self._root.find(nspath('Name')))
self.version = version
self.title = testXMLValue(self._root.find(nspath('Title')))
self.abstract = testXMLValue(self._root.find(nspath('Abstract')))
self.keywords = [f.text for f in self._root.findall(nspath('Keywords'))]
self.fees = testXMLValue(self._root.find(nspath('Fees')))
self.accessconstraints = testXMLValue(self._root.find(nspath('AccessConstraints')))
class ServiceProvider(object):
''' Implements IServiceProviderMetatdata '''
def __init__(self, infoset):
self._root = infoset
self.name = testXMLValue(self._root.find(nspath('Name')))
self.url = testXMLValue(self._root.find(nspath('OnlineResource')))
self.keywords = extract_xml_list(self._root.find(nspath('Keywords')))
# no contact info in this version of the spec
self.contact = None
class ContentMetadata:
"""Abstraction for WFS metadata.
Implements IMetadata.
"""
def __init__(self, elem, parent, parse_remote_metadata=False, timeout=30):
"""."""
self.id = testXMLValue(elem.find(nspath('Name')))
self.title = testXMLValue(elem.find(nspath('Title')))
self.abstract = testXMLValue(elem.find(nspath('Abstract')))
self.keywords = [f.text for f in elem.findall(nspath('Keywords'))]
# bboxes
boxes = elem.findall('BoundingBox')
self.boundingBoxes = []
for b in boxes:
try:
# sometimes the SRS attribute is (wrongly) not provided
srs = b.attrib['SRS']
except KeyError:
srs = None
self.boundingBoxes.append((
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
srs,
))
self.boundingBoxes = list(set(self.boundingBoxes))
self.boundingBoxWGS84 = None
b = elem.find(nspath('LatLongBoundingBox'))
if b is not None:
self.boundingBoxWGS84 = (
float(b.attrib['minx']), float(b.attrib['miny']),
float(b.attrib['maxx']), float(b.attrib['maxy']),
)
# crs options
self.crsOptions = [str(Crs(srs.text)) for srs in elem.findall(nspath('SRS'))]
# verbs
self.verbOptions = [op.tag for op in parent.findall(nspath('Operations/*'))]
self.verbOptions + [
op.tag for op
in elem.findall(nspath('Operations/*'))
if op.tag not in self.verbOptions
]
# others not used but needed for iContentMetadata harmonisation
self.styles = None
self.timepositions = None
self.defaulttimeposition = None
self.attribution = None
# MetadataURLs
self.metadataUrls = []
for m in elem.findall(nspath('MetadataURL')):
metadataUrl = {
'type': testXMLValue(m.attrib['type'], attrib=True),
'format': testXMLValue(m.find('Format')),
'url': testXMLValue(m)
}
if metadataUrl['url'] is not None and parse_remote_metadata: # download URL
try:
content = urlopen(metadataUrl['url'], timeout=timeout)
doc = etree.parse(content)
if metadataUrl['type'] is not None:
if metadataUrl['type'] == 'FGDC':
metadataUrl['metadata'] = Metadata(doc)
if metadataUrl['type'] == 'TC211':
metadataUrl['metadata'] = MD_Metadata(doc)
except Exception:
metadataUrl['metadata'] = None
self.metadataUrls.append(metadataUrl)
class OperationMetadata:
"""Abstraction for WFS metadata.
Implements IMetadata.
"""
def __init__(self, elem):
"""."""
self.name = xmltag_split(elem.tag)
# formatOptions
self.formatOptions = [xmltag_split(f.tag) for f
in elem.findall(nspath('ResultFormat/*')) + elem.findall(nspath('SchemaDescriptionLanguage/*'))]
self.methods = []
for verb in elem.findall(nspath('DCPType/HTTP/*')):
url = verb.attrib['onlineResource']
self.methods.append({'type' : xmltag_split(verb.tag), 'url': url})
class WFSCapabilitiesReader(object):
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='1.0'):
"""Initialize"""
self.version = version
self._infoset = None
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WFS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'version' not in params:
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, url, timeout=30):
"""Get and parse a WFS capabilities document, returning an
instance of WFSCapabilitiesInfoset
Parameters
----------
url : string
The URL to the WFS capabilities document.
timeout : number
A timeout value (in seconds) for the request.
"""
request = self.capabilities_url(url)
u = urlopen(request, timeout=timeout)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WFS capabilities document, returning an
instance of WFSCapabilitiesInfoset
string should be an XML capabilities document
"""
if not isinstance(st, str):
raise ValueError("String must be of type string, not %s" % type(st))
return etree.fromstring(st)
|
|
"""Support to interface with universal remote control devices."""
from __future__ import annotations
from collections.abc import Iterable
from dataclasses import dataclass
from datetime import timedelta
import functools as ft
import logging
from typing import Any, cast, final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_COMMAND,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import ToggleEntity, ToggleEntityDescription
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ATTR_ACTIVITY = "activity"
ATTR_ACTIVITY_LIST = "activity_list"
ATTR_CURRENT_ACTIVITY = "current_activity"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE = "device"
ATTR_NUM_REPEATS = "num_repeats"
ATTR_DELAY_SECS = "delay_secs"
ATTR_HOLD_SECS = "hold_secs"
ATTR_ALTERNATIVE = "alternative"
ATTR_TIMEOUT = "timeout"
DOMAIN = "remote"
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
SERVICE_SEND_COMMAND = "send_command"
SERVICE_LEARN_COMMAND = "learn_command"
SERVICE_DELETE_COMMAND = "delete_command"
SERVICE_SYNC = "sync"
DEFAULT_NUM_REPEATS = 1
DEFAULT_DELAY_SECS = 0.4
DEFAULT_HOLD_SECS = 0
SUPPORT_LEARN_COMMAND = 1
SUPPORT_DELETE_COMMAND = 2
SUPPORT_ACTIVITY = 4
REMOTE_SERVICE_ACTIVITY_SCHEMA = make_entity_service_schema(
{vol.Optional(ATTR_ACTIVITY): cv.string}
)
@bind_hass
def is_on(hass: HomeAssistant, entity_id: str) -> bool:
"""Return if the remote is on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Track states and offer events for remotes."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_OFF, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_turn_off"
)
component.async_register_entity_service(
SERVICE_TURN_ON, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_turn_on"
)
component.async_register_entity_service(
SERVICE_TOGGLE, REMOTE_SERVICE_ACTIVITY_SCHEMA, "async_toggle"
)
component.async_register_entity_service(
SERVICE_SEND_COMMAND,
{
vol.Required(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DEVICE): cv.string,
vol.Optional(
ATTR_NUM_REPEATS, default=DEFAULT_NUM_REPEATS
): cv.positive_int,
vol.Optional(ATTR_DELAY_SECS): vol.Coerce(float),
vol.Optional(ATTR_HOLD_SECS, default=DEFAULT_HOLD_SECS): vol.Coerce(float),
},
"async_send_command",
)
component.async_register_entity_service(
SERVICE_LEARN_COMMAND,
{
vol.Optional(ATTR_DEVICE): cv.string,
vol.Optional(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_COMMAND_TYPE): cv.string,
vol.Optional(ATTR_ALTERNATIVE): cv.boolean,
vol.Optional(ATTR_TIMEOUT): cv.positive_int,
},
"async_learn_command",
)
component.async_register_entity_service(
SERVICE_DELETE_COMMAND,
{
vol.Required(ATTR_COMMAND): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DEVICE): cv.string,
},
"async_delete_command",
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
return await cast(EntityComponent, hass.data[DOMAIN]).async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await cast(EntityComponent, hass.data[DOMAIN]).async_unload_entry(entry)
@dataclass
class RemoteEntityDescription(ToggleEntityDescription):
"""A class that describes remote entities."""
class RemoteEntity(ToggleEntity):
"""Base class for remote entities."""
entity_description: RemoteEntityDescription
_attr_activity_list: list[str] | None = None
_attr_current_activity: str | None = None
_attr_supported_features: int = 0
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._attr_supported_features
@property
def current_activity(self) -> str | None:
"""Active activity."""
return self._attr_current_activity
@property
def activity_list(self) -> list[str] | None:
"""List of available activities."""
return self._attr_activity_list
@final
@property
def state_attributes(self) -> dict[str, Any] | None:
"""Return optional state attributes."""
if not self.supported_features & SUPPORT_ACTIVITY:
return None
return {
ATTR_ACTIVITY_LIST: self.activity_list,
ATTR_CURRENT_ACTIVITY: self.current_activity,
}
def send_command(self, command: Iterable[str], **kwargs: Any) -> None:
"""Send commands to a device."""
raise NotImplementedError()
async def async_send_command(self, command: Iterable[str], **kwargs: Any) -> None:
"""Send commands to a device."""
await self.hass.async_add_executor_job(
ft.partial(self.send_command, command, **kwargs)
)
def learn_command(self, **kwargs: Any) -> None:
"""Learn a command from a device."""
raise NotImplementedError()
async def async_learn_command(self, **kwargs: Any) -> None:
"""Learn a command from a device."""
await self.hass.async_add_executor_job(ft.partial(self.learn_command, **kwargs))
def delete_command(self, **kwargs: Any) -> None:
"""Delete commands from the database."""
raise NotImplementedError()
async def async_delete_command(self, **kwargs: Any) -> None:
"""Delete commands from the database."""
await self.hass.async_add_executor_job(
ft.partial(self.delete_command, **kwargs)
)
class RemoteDevice(RemoteEntity):
"""Representation of a remote (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"RemoteDevice is deprecated, modify %s to extend RemoteEntity",
cls.__name__,
)
|
|
#!/usr/bin/env python
# Copyright (c) 2017 Ericsson and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import socket
import unittest
import logging
import mock
from functest.ci import check_deployment
__author__ = "Jose Lausuch <jose.lausuch@ericsson.com>"
class CheckDeploymentTesting(unittest.TestCase):
"""The super class which testing classes could inherit."""
# pylint: disable=missing-docstring,too-many-public-methods
logging.disable(logging.CRITICAL)
def setUp(self):
self.client_test = mock.Mock()
self.deployment = check_deployment.CheckDeployment()
self.service_test = 'compute'
self.rc_file = self.deployment.rc_file
self.endpoint_test = 'http://192.168.0.6:5000/v3'
creds_attr = {'auth_url': self.endpoint_test,
'proxy_settings': ''}
proxy_attr = {'host': '192.168.0.1', 'port': '5000'}
proxy_settings = mock.Mock()
proxy_settings.configure_mock(**proxy_attr)
self.os_creds = mock.Mock()
self.os_creds.configure_mock(**creds_attr)
self.os_creds.proxy_settings = proxy_settings
self.deployment.os_creds = self.os_creds
@mock.patch('socket.socket.connect', side_effect=TypeError)
def test_verify_connectivity_ko1(self, *args):
self.assertFalse(check_deployment.verify_connectivity("127.0.0.1"))
args[0].assert_called_once_with((None, 80))
@mock.patch('socket.socket.connect', side_effect=socket.error)
def test_verify_connectivity_ko2(self, *args):
self.assertFalse(
check_deployment.verify_connectivity("http://127.0.0.1"))
args[0].assert_called_once_with(("127.0.0.1", 80))
@mock.patch('socket.socket.connect', side_effect=socket.error)
def test_verify_connectivity_ko3(self, *args):
self.assertFalse(
check_deployment.verify_connectivity("https://127.0.0.1"))
args[0].assert_called_once_with(("127.0.0.1", 443))
@mock.patch('socket.socket.connect')
def test_verify_connectivity(self, *args):
self.assertTrue(
check_deployment.verify_connectivity("https://127.0.0.1"))
args[0].assert_called_once_with(("127.0.0.1", 443))
@mock.patch('snaps.openstack.utils.keystone_utils.keystone_session',
return_value=mock.Mock(
get_token=mock.Mock(side_effect=Exception)))
def test_get_auth_token_ko(self, *args):
with self.assertRaises(Exception):
check_deployment.get_auth_token(self.os_creds)
args[0].assert_called_once_with(self.os_creds)
@mock.patch('snaps.openstack.utils.keystone_utils.keystone_session',
return_value=mock.Mock(
get_token=mock.Mock(return_value="foo")))
def test_get_auth_token(self, *args):
self.assertEqual(check_deployment.get_auth_token(self.os_creds), "foo")
args[0].assert_called_once_with(self.os_creds)
@mock.patch('six.moves.builtins.open',
mock.mock_open(read_data='OS_AUTH_URL'))
@mock.patch('functest.ci.check_deployment.os.path.isfile', returns=True)
def test_check_rc(self, *args):
self.deployment.check_rc()
args[0].assert_called_once_with(self.rc_file)
@mock.patch('functest.ci.check_deployment.os.path.isfile',
return_value=False)
def test_check_rc_missing_file(self, *args):
with self.assertRaises(Exception) as context:
self.deployment.check_rc()
args[0].assert_called_once_with(self.rc_file)
msg = 'RC file {} does not exist!'.format(self.rc_file)
self.assertTrue(msg in str(context.exception))
@mock.patch('six.moves.builtins.open',
mock.mock_open(read_data='test'))
@mock.patch('functest.ci.check_deployment.os.path.isfile',
return_value=True)
def test_check_rc_missing_os_auth(self, *args):
with self.assertRaises(Exception) as context:
self.deployment.check_rc()
args[0].assert_called_once_with(self.rc_file)
msg = 'OS_AUTH_URL not defined in {}.'.format(self.rc_file)
self.assertTrue(msg in str(context.exception))
@mock.patch('functest.ci.check_deployment.get_auth_token',
return_value='gAAAAABaOhXGS')
@mock.patch('functest.ci.check_deployment.verify_connectivity',
return_value=True)
def test_check_auth_endpoint(self, *args):
self.deployment.check_auth_endpoint()
args[0].assert_called_once_with(self.endpoint_test)
args[1].assert_called_once_with(mock.ANY)
@mock.patch('functest.ci.check_deployment.verify_connectivity',
return_value=False)
def test_check_auth_endpoint_ko(self, *args):
with self.assertRaises(Exception) as context:
self.deployment.check_auth_endpoint()
msg = "OS_AUTH_URL {} is not reachable.".format(self.os_creds.auth_url)
args[0].assert_called_once_with(self.os_creds.auth_url)
self.assertTrue(msg in str(context.exception))
@mock.patch('functest.ci.check_deployment.verify_connectivity',
return_value=True)
@mock.patch('functest.ci.check_deployment.keystone_utils.get_endpoint')
def test_check_public_endpoint(self, *args):
args[0].return_value = self.endpoint_test
self.deployment.check_public_endpoint()
args[0].assert_called_once_with(
mock.ANY, 'identity', interface='public')
args[1].assert_called_once_with(self.endpoint_test)
@mock.patch('functest.ci.check_deployment.verify_connectivity',
return_value=False)
@mock.patch('functest.ci.check_deployment.keystone_utils.get_endpoint')
def test_check_public_endpoint_ko(self, *args):
args[0].return_value = self.endpoint_test
with self.assertRaises(Exception) as context:
self.deployment.check_public_endpoint()
args[0].assert_called_once_with(
mock.ANY, 'identity', interface='public')
args[1].assert_called_once_with(self.endpoint_test)
msg = "Public endpoint {} is not reachable.".format(self.endpoint_test)
self.assertTrue(msg in str(context.exception))
@mock.patch('functest.ci.check_deployment.verify_connectivity',
return_value=True)
@mock.patch('functest.ci.check_deployment.keystone_utils.get_endpoint')
def test_check_service_endpoint(self, *args):
self.deployment.check_service_endpoint(self.service_test)
args[0].assert_called_once_with(
mock.ANY, self.service_test, interface='public')
args[1].assert_called_once_with(args[0].return_value)
@mock.patch('functest.ci.check_deployment.verify_connectivity',
return_value=False)
@mock.patch('functest.ci.check_deployment.keystone_utils.get_endpoint')
def test_check_service_endpoint_ko(self, *args):
args[0].return_value = self.endpoint_test
with self.assertRaises(Exception) as context:
self.deployment.check_service_endpoint(self.service_test)
msg = "{} endpoint {} is not reachable.".format(
self.service_test, self.endpoint_test)
self.assertTrue(msg in str(context.exception))
args[0].assert_called_once_with(
mock.ANY, self.service_test, interface='public')
args[1].assert_called_once_with(args[0].return_value)
@mock.patch('functest.ci.check_deployment.nova_utils.nova_client')
def test_check_nova(self, mock_method):
self.deployment.check_nova()
mock_method.assert_called_once_with(mock.ANY)
@mock.patch('functest.ci.check_deployment.nova_utils.nova_client',
return_value=mock.Mock(
servers=mock.Mock(list=mock.Mock(side_effect=Exception))))
def test_check_nova_fail(self, mock_method):
with self.assertRaises(Exception):
self.deployment.check_nova()
mock_method.assert_called_once_with(mock.ANY)
@mock.patch('functest.ci.check_deployment.neutron_utils.neutron_client')
def test_check_neutron(self, mock_method):
self.deployment.check_neutron()
mock_method.assert_called_once_with(mock.ANY)
@mock.patch('functest.ci.check_deployment.neutron_utils.neutron_client',
return_value=mock.Mock(
list_networks=mock.Mock(side_effect=Exception)))
def test_check_neutron_fail(self, mock_method):
with self.assertRaises(Exception):
self.deployment.check_neutron()
mock_method.assert_called_once_with(mock.ANY)
@mock.patch('functest.ci.check_deployment.glance_utils.glance_client')
def test_check_glance(self, mock_method):
self.deployment.check_glance()
mock_method.assert_called_once_with(mock.ANY)
@mock.patch('functest.ci.check_deployment.glance_utils.glance_client',
return_value=mock.Mock(
images=mock.Mock(list=mock.Mock(side_effect=Exception))))
def test_check_glance_fail(self, mock_method):
with self.assertRaises(Exception):
self.deployment.check_glance()
mock_method.assert_called_once_with(mock.ANY)
@mock.patch('functest.ci.check_deployment.LOGGER.info')
@mock.patch('functest.opnfv_tests.openstack.snaps.snaps_utils.'
'get_ext_net_name', return_value='ext-net')
def test_check_extnet(self, *args):
self.deployment.check_ext_net()
args[0].assert_called_once_with(mock.ANY)
args[1].assert_called_once_with(
"External network found: %s", "ext-net")
@mock.patch('functest.opnfv_tests.openstack.snaps.snaps_utils.'
'get_ext_net_name', return_value='')
def test_check_extnet_none(self, mock_getext):
with self.assertRaises(Exception) as context:
self.deployment.check_ext_net()
self.assertTrue(mock_getext.called)
msg = 'ERROR: No external networks in the deployment.'
self.assertTrue(msg in str(context.exception))
@mock.patch('functest.ci.check_deployment.CheckDeployment.check_rc',
side_effect=Exception)
def test_check_all_exc1(self, *args):
with self.assertRaises(Exception):
self.deployment.check_all()
args[0].assert_called_once_with()
@mock.patch('snaps.openstack.tests.openstack_tests.get_credentials',
side_effect=Exception)
@mock.patch('functest.ci.check_deployment.CheckDeployment.check_rc')
def test_check_all_exc2(self, *args):
with self.assertRaises(Exception):
self.deployment.check_all()
args[0].assert_called_once_with()
args[1].assert_called_once_with(
os_env_file=self.rc_file, proxy_settings_str=None,
ssh_proxy_cmd=None)
@mock.patch('snaps.openstack.tests.openstack_tests.get_credentials',
return_value=None)
@mock.patch('functest.ci.check_deployment.CheckDeployment.check_rc')
def test_check_all_exc3(self, *args):
with self.assertRaises(Exception):
self.deployment.check_all()
args[0].assert_called_once_with()
args[1].assert_called_once_with(
os_env_file=self.rc_file, proxy_settings_str=None,
ssh_proxy_cmd=None)
@mock.patch('functest.ci.check_deployment.CheckDeployment.check_ext_net')
@mock.patch('functest.ci.check_deployment.CheckDeployment.check_glance')
@mock.patch('functest.ci.check_deployment.CheckDeployment.check_neutron')
@mock.patch('functest.ci.check_deployment.CheckDeployment.check_nova')
@mock.patch(
'functest.ci.check_deployment.CheckDeployment.check_service_endpoint')
@mock.patch(
'functest.ci.check_deployment.CheckDeployment.check_public_endpoint')
@mock.patch(
'functest.ci.check_deployment.CheckDeployment.check_auth_endpoint')
@mock.patch('snaps.openstack.tests.openstack_tests.get_credentials')
@mock.patch('functest.ci.check_deployment.CheckDeployment.check_rc')
def test_check_all(self, *args):
self.assertEqual(self.deployment.check_all(), 0)
for i in [0, 2, 3, 5, 6, 7, 8]:
args[i].assert_called_once_with()
args[1].assert_called_once_with(
os_env_file=self.rc_file, proxy_settings_str=None,
ssh_proxy_cmd=None)
calls = [mock.call('compute'), mock.call('network'),
mock.call('image')]
args[4].assert_has_calls(calls)
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
|
|
from typing import Any, Dict, List, Optional, Text
import logging
import re
from email.header import decode_header
import email.message as message
from django.conf import settings
from zerver.lib.actions import decode_email_address, get_email_gateway_message_string_from_address, \
internal_send_message, internal_send_private_message, \
internal_send_stream_message, internal_send_huddle_message
from zerver.lib.notifications import convert_html_to_markdown
from zerver.lib.queue import queue_json_publish
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.upload import upload_message_image
from zerver.lib.utils import generate_random_token
from zerver.lib.str_utils import force_text
from zerver.lib.send_email import FromAddress
from zerver.models import Stream, Recipient, \
get_user_profile_by_id, get_display_recipient, get_personal_recipient, \
Message, Realm, UserProfile, get_system_bot, get_user
import talon
from talon import quotations
talon.init()
logger = logging.getLogger(__name__)
def redact_stream(error_message: Text) -> Text:
domain = settings.EMAIL_GATEWAY_PATTERN.rsplit('@')[-1]
stream_match = re.search('\\b(.*?)@' + domain, error_message)
if stream_match:
stream_name = stream_match.groups()[0]
return error_message.replace(stream_name, "X" * len(stream_name))
return error_message
def report_to_zulip(error_message: Text) -> None:
if settings.ERROR_BOT is None:
return
error_bot = get_system_bot(settings.ERROR_BOT)
error_stream = Stream.objects.get(name="errors", realm=error_bot.realm)
send_zulip(settings.ERROR_BOT, error_stream, u"email mirror error",
u"""~~~\n%s\n~~~""" % (error_message,))
def log_and_report(email_message: message.Message, error_message: Text, debug_info: Dict[str, Any]) -> None:
scrubbed_error = u"Sender: %s\n%s" % (email_message.get("From"),
redact_stream(error_message))
if "to" in debug_info:
scrubbed_error = u"Stream: %s\n%s" % (redact_stream(debug_info["to"]),
scrubbed_error)
if "stream" in debug_info:
scrubbed_error = u"Realm: %s\n%s" % (debug_info["stream"].realm.string_id,
scrubbed_error)
logger.error(scrubbed_error)
report_to_zulip(scrubbed_error)
# Temporary missed message addresses
redis_client = get_redis_client()
def missed_message_redis_key(token: Text) -> Text:
return 'missed_message:' + token
def is_missed_message_address(address: Text) -> bool:
msg_string = get_email_gateway_message_string_from_address(address)
return is_mm_32_format(msg_string)
def is_mm_32_format(msg_string: Optional[Text]) -> bool:
'''
Missed message strings are formatted with a little "mm" prefix
followed by a randomly generated 32-character string.
'''
return msg_string is not None and msg_string.startswith('mm') and len(msg_string) == 34
def get_missed_message_token_from_address(address: Text) -> Text:
msg_string = get_email_gateway_message_string_from_address(address)
if msg_string is None:
raise ZulipEmailForwardError('Address not recognized by gateway.')
if not is_mm_32_format(msg_string):
raise ZulipEmailForwardError('Could not parse missed message address')
# strip off the 'mm' before returning the redis key
return msg_string[2:]
def create_missed_message_address(user_profile: UserProfile, message: Message) -> str:
if settings.EMAIL_GATEWAY_PATTERN == '':
logger.warning("EMAIL_GATEWAY_PATTERN is an empty string, using "
"NOREPLY_EMAIL_ADDRESS in the 'from' field.")
return FromAddress.NOREPLY
if message.recipient.type == Recipient.PERSONAL:
# We need to reply to the sender so look up their personal recipient_id
recipient_id = get_personal_recipient(message.sender_id).id
else:
recipient_id = message.recipient_id
data = {
'user_profile_id': user_profile.id,
'recipient_id': recipient_id,
'subject': message.subject.encode('utf-8'),
}
while True:
token = generate_random_token(32)
key = missed_message_redis_key(token)
if redis_client.hsetnx(key, 'uses_left', 1):
break
with redis_client.pipeline() as pipeline:
pipeline.hmset(key, data)
pipeline.expire(key, 60 * 60 * 24 * 5)
pipeline.execute()
address = 'mm' + token
return settings.EMAIL_GATEWAY_PATTERN % (address,)
def mark_missed_message_address_as_used(address: Text) -> None:
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
with redis_client.pipeline() as pipeline:
pipeline.hincrby(key, 'uses_left', -1)
pipeline.expire(key, 60 * 60 * 24 * 5)
new_value = pipeline.execute()[0]
if new_value < 0:
redis_client.delete(key)
raise ZulipEmailForwardError('Missed message address has already been used')
def construct_zulip_body(message: message.Message, realm: Realm) -> Text:
body = extract_body(message)
# Remove null characters, since Zulip will reject
body = body.replace("\x00", "")
body = filter_footer(body)
body += extract_and_upload_attachments(message, realm)
body = body.strip()
if not body:
body = '(No email body)'
return body
def send_to_missed_message_address(address: Text, message: message.Message) -> None:
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
result = redis_client.hmget(key, 'user_profile_id', 'recipient_id', 'subject')
if not all(val is not None for val in result):
raise ZulipEmailForwardError('Missing missed message address data')
user_profile_id, recipient_id, subject_b = result # type: (bytes, bytes, bytes)
user_profile = get_user_profile_by_id(user_profile_id)
recipient = Recipient.objects.get(id=recipient_id)
display_recipient = get_display_recipient(recipient)
body = construct_zulip_body(message, user_profile.realm)
if recipient.type == Recipient.STREAM:
assert isinstance(display_recipient, str)
recipient_str = display_recipient
internal_send_stream_message(user_profile.realm, user_profile, recipient_str,
subject_b.decode('utf-8'), body)
elif recipient.type == Recipient.PERSONAL:
assert not isinstance(display_recipient, str)
recipient_str = display_recipient[0]['email']
recipient_user = get_user(recipient_str, user_profile.realm)
internal_send_private_message(user_profile.realm, user_profile,
recipient_user, body)
elif recipient.type == Recipient.HUDDLE:
assert not isinstance(display_recipient, str)
emails = [user_dict['email'] for user_dict in display_recipient]
recipient_str = ', '.join(emails)
internal_send_huddle_message(user_profile.realm, user_profile,
emails, body)
else:
raise AssertionError("Invalid recipient type!")
logger.info("Successfully processed email from %s to %s" % (
user_profile.email, recipient_str))
## Sending the Zulip ##
class ZulipEmailForwardError(Exception):
pass
def send_zulip(sender: Text, stream: Stream, topic: Text, content: Text) -> None:
internal_send_message(
stream.realm,
sender,
"stream",
stream.name,
topic[:60],
content[:2000],
email_gateway=True)
def valid_stream(stream_name: Text, token: Text) -> bool:
try:
stream = Stream.objects.get(email_token=token)
return stream.name.lower() == stream_name.lower()
except Stream.DoesNotExist:
return False
def get_message_part_by_type(message: message.Message, content_type: Text) -> Optional[Text]:
charsets = message.get_charsets()
for idx, part in enumerate(message.walk()):
if part.get_content_type() == content_type:
content = part.get_payload(decode=True)
assert isinstance(content, bytes)
if charsets[idx]:
return content.decode(charsets[idx], errors="ignore")
return None
def extract_body(message: message.Message) -> Text:
# If the message contains a plaintext version of the body, use
# that.
plaintext_content = get_message_part_by_type(message, "text/plain")
if plaintext_content:
return quotations.extract_from_plain(plaintext_content)
# If we only have an HTML version, try to make that look nice.
html_content = get_message_part_by_type(message, "text/html")
if html_content:
return convert_html_to_markdown(quotations.extract_from_html(html_content))
raise ZulipEmailForwardError("Unable to find plaintext or HTML message body")
def filter_footer(text: Text) -> Text:
# Try to filter out obvious footers.
possible_footers = [line for line in text.split("\n") if line.strip().startswith("--")]
if len(possible_footers) != 1:
# Be conservative and don't try to scrub content if there
# isn't a trivial footer structure.
return text
return text.partition("--")[0].strip()
def extract_and_upload_attachments(message: message.Message, realm: Realm) -> Text:
user_profile = get_system_bot(settings.EMAIL_GATEWAY_BOT)
attachment_links = []
payload = message.get_payload()
if not isinstance(payload, list):
# This is not a multipart message, so it can't contain attachments.
return ""
for part in payload:
content_type = part.get_content_type()
filename = part.get_filename()
if filename:
attachment = part.get_payload(decode=True)
if isinstance(attachment, bytes):
s3_url = upload_message_image(filename, len(attachment), content_type,
attachment,
user_profile,
target_realm=realm)
formatted_link = u"[%s](%s)" % (filename, s3_url)
attachment_links.append(formatted_link)
else:
logger.warning("Payload is not bytes (invalid attachment %s in message from %s)." %
(filename, message.get("From")))
return u"\n".join(attachment_links)
def extract_and_validate(email: Text) -> Stream:
temp = decode_email_address(email)
if temp is None:
raise ZulipEmailForwardError("Malformed email recipient " + email)
stream_name, token = temp
if not valid_stream(stream_name, token):
raise ZulipEmailForwardError("Bad stream token from email recipient " + email)
return Stream.objects.get(email_token=token)
def find_emailgateway_recipient(message: message.Message) -> Text:
# We can't use Delivered-To; if there is a X-Gm-Original-To
# it is more accurate, so try to find the most-accurate
# recipient list in descending priority order
recipient_headers = ["X-Gm-Original-To", "Delivered-To", "To"]
recipients = [] # type: List[Text]
for recipient_header in recipient_headers:
r = message.get_all(recipient_header, None)
if r:
recipients = r
break
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
match_email_re = re.compile(".*?".join(pattern_parts))
for recipient_email in recipients:
if match_email_re.match(recipient_email):
return recipient_email
raise ZulipEmailForwardError("Missing recipient in mirror email")
def process_stream_message(to: Text, subject: Text, message: message.Message,
debug_info: Dict[str, Any]) -> None:
stream = extract_and_validate(to)
body = construct_zulip_body(message, stream.realm)
debug_info["stream"] = stream
send_zulip(settings.EMAIL_GATEWAY_BOT, stream, subject, body)
logger.info("Successfully processed email to %s (%s)" % (
stream.name, stream.realm.string_id))
def process_missed_message(to: Text, message: message.Message, pre_checked: bool) -> None:
if not pre_checked:
mark_missed_message_address_as_used(to)
send_to_missed_message_address(to, message)
def process_message(message: message.Message, rcpt_to: Optional[Text]=None, pre_checked: bool=False) -> None:
subject_header = message.get("Subject", "(no subject)")
encoded_subject, encoding = decode_header(subject_header)[0]
if encoding is None:
subject = force_text(encoded_subject) # encoded_subject has type str when encoding is None
else:
try:
subject = encoded_subject.decode(encoding)
except (UnicodeDecodeError, LookupError):
subject = u"(unreadable subject)"
debug_info = {}
try:
if rcpt_to is not None:
to = rcpt_to
else:
to = find_emailgateway_recipient(message)
debug_info["to"] = to
if is_missed_message_address(to):
process_missed_message(to, message, pre_checked)
else:
process_stream_message(to, subject, message, debug_info)
except ZulipEmailForwardError as e:
# TODO: notify sender of error, retry if appropriate.
log_and_report(message, str(e), debug_info)
def mirror_email_message(data: Dict[Text, Text]) -> Dict[str, str]:
rcpt_to = data['recipient']
if is_missed_message_address(rcpt_to):
try:
mark_missed_message_address_as_used(rcpt_to)
except ZulipEmailForwardError:
return {
"status": "error",
"msg": "5.1.1 Bad destination mailbox address: "
"Bad or expired missed message address."
}
else:
try:
extract_and_validate(rcpt_to)
except ZulipEmailForwardError:
return {
"status": "error",
"msg": "5.1.1 Bad destination mailbox address: "
"Please use the address specified in your Streams page."
}
queue_json_publish(
"email_mirror",
{
"message": data['msg_text'],
"rcpt_to": rcpt_to
}
)
return {"status": "success"}
|
|
#!/usr/bin/env python3
VERSION="0.5.0" # MAJOR.MINOR.PATCH | http://semver.org
from collections import defaultdict
from collections import OrderedDict
from io import open
import zipfile
import wave
import math
import re
import os
import sys
import operator
import struct
import argparse
def parse_commandline():
parser = argparse.ArgumentParser(prog='sfz2bitwig', description='Convert an sfz instrument into a Bitwig multisample instrument.')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v{0}'.format(VERSION))
parser.add_argument('--noloop', default=False, action='store_true', help='disable wav loop point extraction')
parser.add_argument('--category', default='', help='set category field of generated multisample')
parser.add_argument('--creator', default='sfz2bitwig', help='set creator field of generated multisample')
parser.add_argument('--description', default='', help='set description field of generated multisample')
parser.add_argument('--keywords', default='', nargs='*', help='set keywords field of generated multisample')
parser.add_argument('sfzfile', nargs='+', help='sfz file(s) to convert')
return parser.parse_args()
def main():
args = parse_commandline()
for fn in args.sfzfile:
# Convert file
multisamp = Multisample(category=args.category, creator=args.creator, description=args.description, keywords=args.keywords )
multisamp.initFromSFZ(fn,args.noloop)
multisamp.write()
return
class Multisample(object):
def __init__(self, name='default', category='', creator='', description='', keywords=None ):
self.name = name
self.category = category
self.creator = creator
self.description = description
self.keywords = keywords
self.samples = []
pass
def initFromSFZ(self, sfzfile, noloop=False):
cur_global_defaults = {}
cur_control_defaults = {}
cur_group_defaults = {}
sfz_opcodes_ignored = defaultdict(int)
region_count = 0
print("\nConverting {} to multisample".format(sfzfile))
sfz = SFZParser(sfzfile)
#print("Finished parsing {}".format(sfzfile))
self.name = "{}".format(os.path.splitext(sfzfile)[0])
for section in sfz.sections:
sectionName = section[0]
#print("start section <{}>".format(sectionName))
if sectionName == "control":
cur_control_defaults = {}
for k, v in section[1].items():
cur_control_defaults[k] = v
if k == "default_path":
cur_control_defaults["default_path"] = os.path.join(os.path.dirname(os.path.abspath(sfzfile)),os.path.normpath(v.replace('\\','/')))
#print("Set control default: {}={}".format(k,cur_control_defaults[k]))
elif sectionName == "group":
cur_group_defaults = {}
for k, v in section[1].items():
cur_group_defaults[k] = v
#print("Set group default: {}={}".format(k,v))
elif sectionName == "global":
cur_global_defaults = {}
for k, v in section[1].items():
cur_global_defaults[k] = v
#print("Set global default: {}={}".format(k,v))
elif sectionName == "region":
region_count += 1
newsample = {}
# Apply settings with priority global < group < region
opcodes = dict(cur_global_defaults)
opcodes.update(cur_group_defaults)
opcodes.update(section[1])
for k, v in opcodes.items():
#print(" {}={}".format(k,v))
if k == "sample":
newsample['file'] = os.path.normpath(v.replace('\\','/'))
if newsample['file'][0] == '/': # relative path should not contain leading slash
newsample['file'] = newsample['file'][1:]
elif k == "lokey":
newsample['keylow'] = self.sfz_note_to_midi_key(v)
elif k == "hikey":
newsample['keyhigh'] = self.sfz_note_to_midi_key(v)
elif k == "pitch_keycenter":
newsample['root'] = self.sfz_note_to_midi_key(v)
elif k == "key":
newsample['keylow'] = self.sfz_note_to_midi_key(v)
newsample['keyhigh'] = self.sfz_note_to_midi_key(v)
newsample['root'] = self.sfz_note_to_midi_key(v)
elif k == "pitch_keytrack":
newsample['track'] = v
elif k == "lovel":
newsample['velocitylow'] = v
elif k == "hivel":
newsample['velocityhigh'] = v
elif k == "volume":
newsample['gain'] = v
elif k == "tune":
newsample['tune'] = int(v) * 0.01
elif k == "loop_mode":
if v != 'one_shot':
newsample['loopmode'] = 'sustain' # bitwig currently supports off or sustain
elif k == "loop_start":
newsample['loopstart'] = v
elif k == "loop_end":
newsample['loopstop'] = v
elif k == "trigger":
newsample['trigger'] = v
elif k == "lorand":
if float(v) > 0.0:
newsample['playlogic'] = "conditional"
elif k == "hirand":
if float(v) < 1.0:
newsample['playlogic'] = "conditional"
elif k == "seq_length":
if int(v) > 1:
newsample['playlogic'] = "conditional"
else:
sfz_opcodes_ignored["{}={}".format(k,v)] += 1
defaultPath = cur_control_defaults.get('default_path',os.path.dirname(os.path.abspath(sfzfile)))
newsampleFullPath = os.path.join(defaultPath,newsample['file'])
newsample['filepath'] = newsampleFullPath
newsample['sample-start'] = '0.000'
newsample['sample-stop'] = self.getsamplecount(newsampleFullPath)
if not noloop:
# Check for loop points embedded in wav file, and specify them in multisample xml as bitwig wont load them from wav automatically
if not newsample.get('loopstart',None) and not newsample.get('loopstop',None):
metadata = self.readwavmetadata(newsampleFullPath,readloops=True)
if metadata[0] and metadata[0][0]:
newsample['loopmode'] = 'sustain'
newsample['loopstart'] = metadata[0][0][0]
newsample['loopstop'] = metadata[0][0][1]
print("Extracted loop point ({},{}) from {}".format(newsample['loopstart'],newsample['loopstop'],newsample['file']))
if 'root' not in newsample and newsample.get('track','true') == 'true':
print("ERROR: No pitch_keycenter for sample {}, root of sample will need to be manually adjusted in Bitwig".format(newsample['file']))
newsample['root'] = 0 # bitwig defaults to c4 when root is not given, make the issue more obvious with a more extreme value
if newsample['filepath'] in [s['filepath'] for s in self.samples]:
print("WARNING: Skipping duplicate sample: {} ({})".format(os.path.basename(newsample.get('file','')),newsample.get('filepath','')))
elif 'trigger' in newsample:
# bitwig multisample only supports note-on events
print("WARNING: Skipping sample with unhandled trigger event: trigger={}".format(newsample['trigger']))
else:
self.samples.append(newsample)
#print("Converted sample {}".format(newsample['file']))
elif sectionName == "curve":
sfz_opcodes_ignored["{}={}".format(k,v)] += 1
#print("WARNING: Ignoring SFZ opcode {}={}".format(k,v))
elif sectionName == "effect":
sfz_opcodes_ignored["{}={}".format(k,v)] += 1
#print("WARNING: Ignoring SFZ opcode {}={}".format(k,v))
elif sectionName == "comment":
pass
else:
print("WARNING: Unhandled section {}".format(sectionName))
sfz_opcodes_ignored["{}={}".format(k,v)] += 1
print("Finished converting {} to multisample".format(sfzfile))
print("\nConversion Results:")
print(" {} samples mapped from {} regions".format(len(self.samples),region_count))
if sfz_opcodes_ignored:
sfz_opcodes_ignored_count = 0
for k, v in sfz_opcodes_ignored.items():
sfz_opcodes_ignored_count += v
print("\n {} SFZ opcodes were lost in translation:".format(sfz_opcodes_ignored_count))
sorted_sfz_opcodes_ignored = sorted(sfz_opcodes_ignored.items(), key=operator.itemgetter(1), reverse=True)
for v in sorted_sfz_opcodes_ignored:
print(" ({}) {}".format(v[1],v[0]))
sfz_ahdsr_opcodes = ['ampeg_release', 'ampeg_sustain', 'ampeg_hold', 'ampeg_decay', 'ampeg_attack']
suggest_ahdsr = { k: v for k, v in sfz_opcodes_ignored.items() if k.split('=')[0] in sfz_ahdsr_opcodes }
if suggest_ahdsr:
print("\n Suggested Bitwig sampler AHDSR settings:")
ahdsr = self.getbestahdsr(suggest_ahdsr)
if ahdsr['attack'][0]:
print(" ({}) A = {} s".format(ahdsr['attack'][1],ahdsr['attack'][0]))
if ahdsr['hold'][0]:
print(" ({}) H = {} %".format(ahdsr['hold'][1],ahdsr['hold'][0]))
if ahdsr['decay'][0]:
print(" ({}) D = {} s".format(ahdsr['decay'][1],ahdsr['decay'][0]))
if ahdsr['sustain'][0]:
print(" ({}) S = {} %".format(ahdsr['sustain'][1],ahdsr['sustain'][0]))
if ahdsr['release'][0]:
print(" ({}) R = {} s".format(ahdsr['release'][1],ahdsr['release'][0]))
def makexml(self):
xml = ''
xml += '<?xml version="1.0" encoding="UTF-8"?>\n'
xml += '<multisample name="{}">\n'.format(self.name)
xml += ' <generator>Bitwig Studio</generator>\n'
if self.category:
xml += ' <category>{}</category>\n'.format(self.category)
else:
xml += ' <category/>\n'
if self.creator:
xml += ' <creator>{}</creator>\n'.format(self.creator)
else:
xml += ' <creator/>\n'
if self.description:
xml += ' <description>{}</description>\n'.format(self.description)
else:
xml += ' <description/>\n'
if self.keywords:
xml += ' <keywords>\n'
for keyword in self.keywords:
xml += ' <keyword>{}</keyword>\n'.format(keyword)
xml += ' </keywords>\n'
else:
xml += ' <keywords/>\n'
xml += ' <layer name="Default">\n'
for sample in self.samples:
zonelogic = 'round-robin' if sample.get('playlogic') == "conditional" else 'always-play'
xml += ' <sample file="{}" gain="{}" sample-start="{}" sample-stop="{}" zone-logic="{}">\n'.format(os.path.basename(sample.get('file','')),sample.get('gain','0.00'),sample.get('sample-start','0.000'),sample.get('sample-stop','0.000'),zonelogic)
xml += ' <key high="{}" low="{}" root="{}" track="{}" tune="{}"/>\n'.format(sample.get('keyhigh',''),sample.get('keylow',''),sample.get('root',''),sample.get('track','true'),sample.get('tune','0.0'))
vhigh = int(sample.get('velocityhigh','127'))
vlow = int(sample.get('velocitylow','0'))
if vhigh == 127 and vlow == 0:
xml += ' <velocity/>\n'
elif vlow == 0:
xml += ' <velocity high="{}"/>\n'.format(vhigh)
elif vhigh == 127:
xml += ' <velocity low="{}"/>\n'.format(vlow)
else:
xml += ' <velocity high="{}" low="{}"/>\n'.format(vhigh,vlow)
xml += ' <loop mode="{}" start="{}" stop="{}"/>\n'.format(sample.get('loopmode','off'),sample.get('loopstart','0.000'),sample.get('loopstop',sample.get('sample-stop','0.000')))
xml += ' </sample>\n'
xml += ' </layer>\n'
xml += '</multisample>\n'
return xml
def write(self, outpath=None):
xml = self.makexml()
if not outpath:
outpath = "{}.multisample".format(self.name)
print("\nWriting multisample {}".format(outpath))
# Build zip containing multisample.xml and sample files
zf = zipfile.ZipFile(outpath,mode='w',compression=zipfile.ZIP_DEFLATED)
try:
#print("Adding multisample.xml")
zf.writestr('multisample.xml',xml)
for sample in self.samples:
#print("Adding sample: {} ({})".format(os.path.basename(sample.get('file','')),sample.get('filepath','')))
zf.write(sample.get('filepath',''),os.path.basename(sample.get('file','')))
finally:
zf.close
print("Finished writing multisample {}".format(outpath))
def getbestahdsr(self, histogram):
ahdsr = { 'attack':[None,0], 'hold':[None,0], 'decay':[None,0], 'sustain':[None,0], 'release':[None,0] }
for k, v in histogram.items():
settingName, settingValue = k.split('=')
settingName = settingName.split('_')[1]
confidence = v
if confidence > ahdsr[settingName][1]:
ahdsr[settingName][0] = settingValue
ahdsr[settingName][1] = confidence
return ahdsr
def getsamplecount(self, path):
ifile = wave.open(path)
sampcount = ifile.getnframes()
return sampcount
# based on https://gist.github.com/josephernest/3f22c5ed5dabf1815f16efa8fa53d476
def readwavmetadata(self, file, readmarkers=False, readmarkerlabels=False, readmarkerslist=False, readloops=False, readpitch=False):
if hasattr(file,'read'):
fid = file
else:
fid = open(file, 'rb')
def _read_riff_chunk(fid):
str1 = fid.read(4)
if str1 != b'RIFF':
raise ValueError("Not a WAV file.")
fsize = struct.unpack('<I', fid.read(4))[0] + 8
str2 = fid.read(4)
if (str2 != b'WAVE'):
raise ValueError("Not a WAV file.")
return fsize
fsize = _read_riff_chunk(fid)
noc = 1
bits = 8
#_cue = []
#_cuelabels = []
_markersdict = defaultdict(lambda: {'position': -1, 'label': ''})
loops = []
pitch = 0.0
while (fid.tell() < fsize):
# read the next chunk
chunk_id = fid.read(4)
if chunk_id == b'fmt ':
pass
elif chunk_id == b'data':
pass
elif chunk_id == b'cue ':
str1 = fid.read(8)
size, numcue = struct.unpack('<ii',str1)
for c in range(numcue):
str1 = fid.read(24)
id, position, datachunkid, chunkstart, blockstart, sampleoffset = struct.unpack('<iiiiii', str1)
#_cue.append(position)
_markersdict[id]['position'] = position # needed to match labels and markers
elif chunk_id == b'LIST':
str1 = fid.read(8)
size, type = struct.unpack('<ii', str1)
elif chunk_id in [b'ICRD', b'IENG', b'ISFT', b'ISTJ']: # see http://www.pjb.com.au/midi/sfspec21.html#i5
pass
elif chunk_id == b'labl':
str1 = fid.read(8)
size, id = struct.unpack('<ii',str1)
size = size + (size % 2) # the size should be even, see WAV specfication, e.g. 16=>16, 23=>24
label = fid.read(size-4).rstrip(b'\x00') # remove the trailing null characters
#_cuelabels.append(label)
_markersdict[id]['label'] = label # needed to match labels and markers
elif chunk_id == b'smpl':
str1 = fid.read(40)
size, manuf, prod, sampleperiod, midiunitynote, midipitchfraction, smptefmt, smpteoffs, numsampleloops, samplerdata = struct.unpack('<iiiiiIiiii', str1)
cents = midipitchfraction * 1./(2**32-1)
pitch = 440. * 2 ** ((midiunitynote + cents - 69.)/12)
for i in range(numsampleloops):
str1 = fid.read(24)
cuepointid, type, start, end, fraction, playcount = struct.unpack('<iiiiii', str1)
loops.append([start, end])
else:
pass
fid.close()
_markerslist = sorted([_markersdict[l] for l in _markersdict], key=lambda k: k['position']) # sort by position
_cue = [m['position'] for m in _markerslist]
_cuelabels = [m['label'] for m in _markerslist]
return ((_cue,) if readmarkers else ()) \
+ ((_cuelabels,) if readmarkerlabels else ()) \
+ ((_markerslist,) if readmarkerslist else ()) \
+ ((loops,) if readloops else ()) \
+ ((pitch,) if readpitch else ())
def sfz_note_to_midi_key(self, sfz_note):
SFZ_NOTE_LETTER_OFFSET = {'a': 9, 'b': 11, 'c': 0, 'd': 2, 'e': 4, 'f': 5, 'g': 7}
letter = sfz_note[0].lower()
if letter not in SFZ_NOTE_LETTER_OFFSET.keys():
return sfz_note
sharp = '#' in sfz_note
octave = int(sfz_note[-1])
# Notes in bitwig multisample are an octave off (i.e. c4=60, not c3=60)
return SFZ_NOTE_LETTER_OFFSET[letter] + ((octave + 2) * 12) + (1 if sharp else 0)
#SFZParser code taken from https://github.com/SpotlightKid/sfzparser/blob/master/sfzparser.py
class SFZParser(object):
rx_section = re.compile('^<([^>]+)>\s?')
def __init__(self, sfz_path, encoding=None, **kwargs):
self.encoding = encoding
self.sfz_path = sfz_path
self.groups = []
self.sections = []
with open(sfz_path, encoding=self.encoding or 'utf-8-sig') as sfz:
self.parse(sfz)
def parse(self, sfz):
sections = self.sections
cur_section = []
value = None
for line in sfz:
line = line.strip()
if not line:
continue
if line.startswith('//'):
sections.append(('comment', line))
continue
while line:
match = self.rx_section.search(line)
if match:
if cur_section:
sections.append((section_name, OrderedDict(reversed(cur_section))))
cur_section = []
section_name = match.group(1).strip()
line = line[match.end():].lstrip()
elif "=" in line:
line, _, value = line.rpartition('=')
if '=' in line:
line, key = line.rsplit(None, 1)
cur_section.append((key, value))
value = None
elif value:
line, key = None, line
cur_section.append((key, value))
else:
if line.startswith('//'):
print("Warning: inline comment")
sections.append(('comment', line))
# ignore garbage
break
if cur_section:
sections.append((section_name, OrderedDict(reversed(cur_section))))
return sections
if __name__ == "__main__":
main()
|
|
# Copyright (C) 2011 Sam Rushing
# Copyright (C) 2012-2015 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
"""ECC secp256k1 crypto routines
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
This is a replacement module for bitcoin.core.key which properly specs
ctypes calls and avoids occational segfaults. It is based on posita's PR
petertodd/python-bitcoinlib#79
"""
import ctypes
import ctypes.util
import hashlib
import sys
# replace the bitcoin.core.key module with this module
sys.modules['bitcoin.core.key'] = sys.modules[__name__]
import bitcoin.core
bitcoin.core.key = sys.modules[__name__]
import bitcoin.core.script
_ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library('ssl') or 'libeay32')
_ssl.BN_add.restype = ctypes.c_int
_ssl.BN_add.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_ssl.BN_bin2bn.restype = ctypes.c_void_p
_ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
_ssl.BN_cmp.restype = ctypes.c_int
_ssl.BN_cmp.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ssl.BN_copy.restype = ctypes.c_void_p
_ssl.BN_copy.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ssl.BN_free.restype = None
_ssl.BN_free.argtypes = [ctypes.c_void_p]
_ssl.BN_mod_inverse.restype = ctypes.c_void_p
_ssl.BN_mod_inverse.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_ssl.BN_mod_mul.restype = ctypes.c_int
_ssl.BN_mod_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_ssl.BN_mod_sub.restype = ctypes.c_int
_ssl.BN_mod_sub.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_ssl.BN_mul_word.restype = ctypes.c_int
_ssl.BN_mul_word.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ssl.BN_new.restype = ctypes.c_void_p
_ssl.BN_new.argtypes = []
_ssl.BN_rshift.restype = ctypes.c_int
_ssl.BN_rshift.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
_ssl.BN_rshift1.restype = ctypes.c_int
_ssl.BN_rshift1.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ssl.BN_sub.restype = ctypes.c_int
_ssl.BN_sub.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# _ssl.BN_zero.restype = ctypes.c_int
# _ssl.BN_zero.argtypes = [ctypes.c_void_p]
_ssl.BN_CTX_free.restype = None
_ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
_ssl.BN_CTX_get.restype = ctypes.c_void_p
_ssl.BN_CTX_get.argtypes = [ctypes.c_void_p]
_ssl.BN_CTX_new.restype = ctypes.c_void_p
_ssl.BN_CTX_new.argtypes = []
_ssl.EC_GROUP_get_curve_GFp.restype = ctypes.c_int
_ssl.EC_GROUP_get_curve_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_ssl.EC_GROUP_get_degree.restype = ctypes.c_int
_ssl.EC_GROUP_get_degree.argtypes = [ctypes.c_void_p]
_ssl.EC_GROUP_get_order.restype = ctypes.c_int
_ssl.EC_GROUP_get_order.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_ssl.EC_KEY_free.restype = None
_ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
_ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
_ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
_ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
_ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
_ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
_ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
_ssl.EC_KEY_set_conv_form.restype = None
_ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
_ssl.EC_KEY_set_private_key.restype = ctypes.c_int
_ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ssl.EC_KEY_set_public_key.restype = ctypes.c_int
_ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ssl.EC_POINT_free.restype = None
_ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
_ssl.EC_POINT_is_at_infinity.restype = ctypes.c_int
_ssl.EC_POINT_is_at_infinity.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ssl.EC_POINT_new.restype = ctypes.c_void_p
_ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
_ssl.EC_POINT_mul.restype = ctypes.c_int
_ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_ssl.EC_POINT_set_compressed_coordinates_GFp.restype = ctypes.c_int
_ssl.EC_POINT_set_compressed_coordinates_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
_ssl.ECDSA_sign.restype = ctypes.c_int
_ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_ssl.ECDSA_size.restype = ctypes.c_int
_ssl.ECDSA_size.argtypes = [ctypes.c_void_p]
_ssl.ECDSA_verify.restype = ctypes.c_int
_ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
_ssl.ECDSA_SIG_free.restype = None
_ssl.ECDSA_SIG_free.argtypes = [ctypes.c_void_p]
_ssl.ECDH_compute_key.restype = ctypes.c_int
_ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
_ssl.ERR_error_string_n.restype = None
_ssl.ERR_error_string_n.argtypes = [ctypes.c_ulong, ctypes.c_char_p, ctypes.c_size_t]
_ssl.ERR_get_error.restype = ctypes.c_ulong
_ssl.ERR_get_error.argtypes = []
_ssl.d2i_ECDSA_SIG.restype = ctypes.c_void_p
_ssl.d2i_ECDSA_SIG.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_long]
_ssl.d2i_ECPrivateKey.restype = ctypes.c_void_p
_ssl.d2i_ECPrivateKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_long]
_ssl.i2d_ECDSA_SIG.restype = ctypes.c_int
_ssl.i2d_ECDSA_SIG.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ssl.i2d_ECPrivateKey.restype = ctypes.c_int
_ssl.i2d_ECPrivateKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
_ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ssl.o2i_ECPublicKey.restype = ctypes.c_void_p
_ssl.o2i_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_long]
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result (val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p(val)
_ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
# this specifies the curve used with ECDSA.
_NID_secp256k1 = 714 # from openssl/obj_mac.h
# test that openssl support secp256k1
if _ssl.EC_KEY_new_by_curve_name(_NID_secp256k1) == 0:
errno = _ssl.ERR_get_error()
errmsg = ctypes.create_string_buffer(120)
_ssl.ERR_error_string_n(errno, errmsg, 120)
raise RuntimeError('openssl error: %s' % errmsg.value)
# From openssl/ecdsa.h
class ECDSA_SIG_st(ctypes.Structure):
_fields_ = [("r", ctypes.c_void_p),
("s", ctypes.c_void_p)]
class CECKey:
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = _ssl.EC_KEY_new_by_curve_name(_NID_secp256k1)
def __del__(self):
if _ssl:
_ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = _ssl.BN_bin2bn(secret, 32, _ssl.BN_new())
group = _ssl.EC_KEY_get0_group(self.k)
pub_key = _ssl.EC_POINT_new(group)
ctx = _ssl.BN_CTX_new()
if not _ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
_ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
_ssl.EC_KEY_set_private_key(self.k, priv_key)
_ssl.EC_KEY_set_public_key(self.k, pub_key)
_ssl.EC_POINT_free(pub_key)
_ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return _ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return _ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = _ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
_ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = _ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
_ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = _ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
_ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash):
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = _ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = _ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
if bitcoin.core.script.IsLowDERSignature(mb_sig.raw[:sig_size0.value]):
return mb_sig.raw[:sig_size0.value]
else:
return self.signature_to_low_s(mb_sig.raw[:sig_size0.value])
def signature_to_low_s(self, sig):
der_sig = ECDSA_SIG_st()
_ssl.d2i_ECDSA_SIG(ctypes.byref(ctypes.pointer(der_sig)), ctypes.byref(ctypes.c_char_p(sig)), len(sig))
group = _ssl.EC_KEY_get0_group(self.k)
order = _ssl.BN_new()
halforder = _ssl.BN_new()
ctx = _ssl.BN_CTX_new()
_ssl.EC_GROUP_get_order(group, order, ctx)
_ssl.BN_rshift1(halforder, order)
# Verify that s is over half the order of the curve before we actually subtract anything from it
if _ssl.BN_cmp(der_sig.s, halforder) > 0:
_ssl.BN_sub(der_sig.s, order, der_sig.s)
_ssl.BN_free(halforder)
_ssl.BN_free(order)
_ssl.BN_CTX_free(ctx)
derlen = _ssl.i2d_ECDSA_SIG(ctypes.pointer(der_sig), 0)
if derlen == 0:
_ssl.ECDSA_SIG_free(der_sig)
return None
new_sig = ctypes.create_string_buffer(derlen)
_ssl.i2d_ECDSA_SIG(ctypes.pointer(der_sig), ctypes.byref(ctypes.pointer(new_sig)))
_ssl.BN_free(der_sig.r)
_ssl.BN_free(der_sig.s)
return new_sig.raw
def verify(self, hash, sig):
"""Verify a DER signature"""
if not sig:
return false
# New versions of OpenSSL will reject non-canonical DER signatures. de/re-serialize first.
norm_sig = ctypes.c_void_p(0)
_ssl.d2i_ECDSA_SIG(ctypes.byref(norm_sig), ctypes.byref(ctypes.c_char_p(sig)), len(sig))
derlen = _ssl.i2d_ECDSA_SIG(norm_sig, 0)
if derlen == 0:
_ssl.ECDSA_SIG_free(norm_sig)
return false
norm_der = ctypes.create_string_buffer(derlen)
_ssl.i2d_ECDSA_SIG(norm_sig, ctypes.byref(ctypes.pointer(norm_der)))
_ssl.ECDSA_SIG_free(norm_sig)
# -1 = error, 0 = bad sig, 1 = good
return _ssl.ECDSA_verify(0, hash, len(hash), norm_der, derlen, self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
_ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = bool(_cec_key.set_pubkey(self))
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
__all__ = (
'CECKey',
'CPubKey',
)
|
|
#!/usr/bin/env python
"""A Skeleton HTML page template, that provides basic structure and utility methods.
"""
##################################################
## DEPENDENCIES
import sys
import os
import os.path
from os.path import getmtime, exists
import time
import types
import __builtin__
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import DummyTransaction
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Cheetah.Templates._SkeletonPage import _SkeletonPage
##################################################
## MODULE CONSTANTS
try:
True, False
except NameError:
True, False = (1==1), (1==0)
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.0rc6'
__CHEETAH_versionTuple__ = (2, 0, 0, 'candidate', 6)
__CHEETAH_genTime__ = 1139107954.3640411
__CHEETAH_genTimestamp__ = 'Sat Feb 4 18:52:34 2006'
__CHEETAH_src__ = 'src/Templates/SkeletonPage.tmpl'
__CHEETAH_srcLastModified__ = 'Mon Oct 7 11:37:30 2002'
__CHEETAH_docstring__ = 'Autogenerated by CHEETAH: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class SkeletonPage(_SkeletonPage):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
_SkeletonPage.__init__(self, *args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def writeHeadTag(self, **KWS):
## CHEETAH: generated from #block writeHeadTag at line 22, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('<head>\n<title>')
_v = VFFSL(SL,"title",True) # '$title' on line 24, col 8
if _v is not None: write(_filter(_v, rawExpr='$title')) # from line 24, col 8.
write('</title>\n')
_v = VFFSL(SL,"metaTags",True) # '$metaTags' on line 25, col 1
if _v is not None: write(_filter(_v, rawExpr='$metaTags')) # from line 25, col 1.
write(' \n')
_v = VFFSL(SL,"stylesheetTags",True) # '$stylesheetTags' on line 26, col 1
if _v is not None: write(_filter(_v, rawExpr='$stylesheetTags')) # from line 26, col 1.
write(' \n')
_v = VFFSL(SL,"javascriptTags",True) # '$javascriptTags' on line 27, col 1
if _v is not None: write(_filter(_v, rawExpr='$javascriptTags')) # from line 27, col 1.
write('\n</head>\n')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def writeBody(self, **KWS):
## CHEETAH: generated from #block writeBody at line 36, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('This skeleton page has no flesh. Its body needs to be implemented.\n')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
## START CACHE REGION: ID=header. line 6, col 1 in the source.
_RECACHE_header = False
_cacheRegion_header = self.getCacheRegion(regionID='header', cacheInfo={'type': 2, 'id': 'header'})
if _cacheRegion_header.isNew():
_RECACHE_header = True
_cacheItem_header = _cacheRegion_header.getCacheItem('header')
if _cacheItem_header.hasExpired():
_RECACHE_header = True
if (not _RECACHE_header) and _cacheItem_header.getRefreshTime():
try:
_output = _cacheItem_header.renderOutput()
except KeyError:
_RECACHE_header = True
else:
write(_output)
del _output
if _RECACHE_header or not _cacheItem_header.getRefreshTime():
_orig_transheader = trans
trans = _cacheCollector_header = DummyTransaction()
write = _cacheCollector_header.response().write
_v = VFFSL(SL,"docType",True) # '$docType' on line 7, col 1
if _v is not None: write(_filter(_v, rawExpr='$docType')) # from line 7, col 1.
write('\n')
_v = VFFSL(SL,"htmlTag",True) # '$htmlTag' on line 8, col 1
if _v is not None: write(_filter(_v, rawExpr='$htmlTag')) # from line 8, col 1.
write('''
<!-- This document was autogenerated by Cheetah(http://CheetahTemplate.org).
Do not edit it directly!
Copyright ''')
_v = VFFSL(SL,"currentYr",True) # '$currentYr' on line 12, col 11
if _v is not None: write(_filter(_v, rawExpr='$currentYr')) # from line 12, col 11.
write(' - ')
_v = VFFSL(SL,"siteCopyrightName",True) # '$siteCopyrightName' on line 12, col 24
if _v is not None: write(_filter(_v, rawExpr='$siteCopyrightName')) # from line 12, col 24.
write(' - All Rights Reserved.\nFeel free to copy any javascript or html you like on this site,\nprovided you remove all links and/or references to ')
_v = VFFSL(SL,"siteDomainName",True) # '$siteDomainName' on line 14, col 52
if _v is not None: write(_filter(_v, rawExpr='$siteDomainName')) # from line 14, col 52.
write('''
However, please do not copy any content or images without permission.
''')
_v = VFFSL(SL,"siteCredits",True) # '$siteCredits' on line 17, col 1
if _v is not None: write(_filter(_v, rawExpr='$siteCredits')) # from line 17, col 1.
write('''
-->
''')
self.writeHeadTag(trans=trans)
write('\n')
trans = _orig_transheader
write = trans.response().write
_cacheData = _cacheCollector_header.response().getvalue()
_cacheItem_header.setData(_cacheData)
write(_cacheData)
del _cacheData
del _cacheCollector_header
del _orig_transheader
## END CACHE REGION: header
write('\n')
_v = VFFSL(SL,"bodyTag",True) # '$bodyTag' on line 34, col 1
if _v is not None: write(_filter(_v, rawExpr='$bodyTag')) # from line 34, col 1.
write('\n\n')
self.writeBody(trans=trans)
write('''
</body>
</html>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_SkeletonPage= 'respond'
## END CLASS DEFINITION
if not hasattr(SkeletonPage, '_initCheetahAttributes'):
templateAPIClass = getattr(SkeletonPage, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(SkeletonPage)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=SkeletonPage()).run()
|
|
import numpy as np
from jax import (ShapedArray, abstract_arrays, dtypes, core as jc,
linear_util as lu)
from jax.util import safe_map, safe_zip
from jax.core import Literal, Jaxpr, JaxprEqn, Var, TypedJaxpr
from jax.interpreters import xla, partial_eval as pe
from functools import partial
map = safe_map
zip = safe_zip
class InfShapeError(Exception): pass
class InfType:
neg: bool
def __init__(self, neg=False):
self.neg = neg
def __add__(self, other):
if isinstance(other, InfType) and self.neg != other.neg:
raise InfShapeError
else:
return _neginf if self.neg else inf
def __sub__(self, other):
if isinstance(other, InfType) and self.neg == other.neg:
raise InfShapeError
else:
return _neginf if self.neg else inf
def __neg__(self):
return inf if self.neg else _neginf
def __mul__(self, other):
if not isinstance(other, InfType) and other == 0:
raise InfShapeError
other_neg = other.neg if isinstance(other, InfType) else other < 0
return inf if other_neg == self.neg else _neginf
def __rmul__(self, other):
return self * other # multiplication commutes
def __radd__(self, other):
return self + other # addition commutes
def __rsub__(self, other):
if isinstance(other, InfType) and self.neg == other.neg:
raise InfShapeError
else:
return inf if self.neg else _neginf
def __floordiv__(self, divisor):
if isinstance(divisor, InfType):
raise InfShapeError
else:
divisor_neg = divisor.neg if isinstance(divisor, InfType) else divisor < 0
return inf if self.neg == divisor_neg else _neginf
def __eq__(self, other):
if isinstance(other, InfType):
return self.neg == other.neg
else:
return False
def __ne__(self, other):
if isinstance(other, InfType):
return self.neg != other.neg
else:
return True
def __ge__(self, other):
return not self.neg
def __le__(self, other):
return self.neg
def __gt__(self, other):
return not (self.neg or (isinstance(other, InfType) and not other.neg))
def __lt__(self, other):
return self.neg and not (isinstance(other, InfType) and other.neg)
def __str__(self):
return '-inf' if self.neg else 'inf'
def __repr__(self):
return self.__str__()
abstract_arrays._DIMENSION_TYPES.add(InfType)
inf = InfType()
_neginf = InfType(neg=True)
def abstractify(x):
return ShapedArray(np.shape(x), dtypes.result_type(x))
def fastar_jaxpr(flat_fun, *args_flat):
in_avals = map(abstractify, args_flat)
in_pvals = map(pe.PartialVal.unknown, in_avals)
jaxpr, out_pvals, consts = pe.trace_to_jaxpr(
flat_fun, in_pvals, instantiate=True)
out_avals = [v.get_aval() for v in out_pvals]
return TypedJaxpr(refresh_names(inline_calls(submerge_consts(jaxpr, consts))),
[], in_avals, out_avals)
def tie_the_knot(typed_jaxpr):
jaxpr, _, in_avals, out_avals = typed_jaxpr
assert all(i == o for i, o in zip(in_avals, out_avals))
in2out = dict(zip(jaxpr.invars, jaxpr.outvars))
def replace(eqn):
invars = [in2out[i] if (isinstance(i, jc.Var) and i in in2out) else i
for i in eqn.invars]
return jc.JaxprEqn(invars, eqn.outvars, eqn.primitive, eqn.params,
eqn.source_info)
eqns = [replace(eqn) for eqn in jaxpr.eqns]
new_jaxpr = jc.Jaxpr(jaxpr.constvars, [], jaxpr.outvars, eqns)
return jc.TypedJaxpr(new_jaxpr, typed_jaxpr.literals, [],
typed_jaxpr.out_avals)
# Move constants inside jaxpr, i.e. make them into 'literals'
# Need a custom literal class because jax.core.literal only supports scalars
class Literal_(Literal):
__slots__ = ["val"]
def __init__(self, val):
self.val = val
@property
def aval(self):
return jc.raise_to_shaped(jc.get_aval(self.val))
def __hash__(self):
return id(self.val)
def __eq__(self, other):
return self.val is other.val
def __repr__(self):
return '{}'.format(self.val)
def inline_calls(jaxpr):
new_eqns = []
def inline_call(jaxpr, invars, outvars):
inmap = dict(zip(jaxpr.invars, invars))
outmap = dict(zip(jaxpr.outvars, outvars))
for eqn in jaxpr.eqns:
new_invars = [v if isinstance(v, Literal) else inmap.get(v, v)
for v in eqn.invars]
new_outvars = [outmap.get(v, v) for v in eqn.outvars]
call_jaxpr, params = jc.extract_call_jaxpr(eqn.primitive, eqn.params)
if call_jaxpr:
if not eqn.primitive in {jc.call_p, xla.xla_call_p}:
raise NotImplementedError
inline_call(call_jaxpr, new_invars, new_outvars)
else:
new_eqns.append(
JaxprEqn(new_invars, new_outvars, eqn.primitive, eqn.params,
eqn.source_info))
for eqn in jaxpr.eqns:
call_jaxpr, params = jc.extract_call_jaxpr(eqn.primitive, eqn.params)
if call_jaxpr:
if not eqn.primitive in {jc.call_p, xla.xla_call_p}:
raise NotImplementedError
inline_call(call_jaxpr, eqn.invars, eqn.outvars)
else:
new_eqns.append(eqn)
return Jaxpr(jaxpr.constvars, jaxpr.invars, jaxpr.outvars, new_eqns)
def refresh_names(jaxpr):
vs = {}
g = jc.gensym()
varmap = lambda v: vs[v] if v in vs else vs.setdefault(v, g(v.aval))
jaxpr_constvars = map(varmap, jaxpr.constvars)
jaxpr_invars = map(varmap, jaxpr.invars)
new_eqns = []
for eqn in jaxpr.eqns:
invars = [v if isinstance(v, Literal) else varmap(v) for v in eqn.invars]
outvars = map(varmap, eqn.outvars)
new_eqns.append(
JaxprEqn(invars, outvars, eqn.primitive, eqn.params, eqn.source_info))
jaxpr_outvars = map(varmap, jaxpr.outvars)
return Jaxpr(jaxpr_constvars, jaxpr_invars, jaxpr_outvars, new_eqns)
def submerge_consts(jaxpr, consts, invals=None):
"""
Replace constvars with literals in jaxpr and its sub-jaxprs.
"""
# TODO(j-towns): check that consts are in jax.core.literalable_types
consts = dict(zip(jaxpr.constvars, consts))
if invals is not None:
# We're in a call_jaxpr
new_jaxpr_invars = []
for var, val in zip(jaxpr.invars, invals):
if isinstance(val, Var):
new_jaxpr_invars.append(var)
else:
consts[var] = val
else:
new_jaxpr_invars = jaxpr.invars
new_eqns = []
for eqn in jaxpr.eqns:
if all(isinstance(var, Literal) or var in consts for var in eqn.invars):
# Perform constant folding if all inputs to an eqn are known
in_vals = [var.val if isinstance(var, Literal) else consts[var]
for var in eqn.invars]
call_jaxpr, params = jc.extract_call_jaxpr(eqn.primitive, eqn.params)
if call_jaxpr:
subfuns = [lu.wrap_init(partial(jc.eval_jaxpr, call_jaxpr, ()))]
else:
subfuns = []
ans = eqn.primitive.bind(*(subfuns + in_vals), **params)
if eqn.primitive.multiple_results:
for outvar, out in zip(eqn.outvars, ans):
consts[outvar] = out
else:
outvar, = eqn.outvars
consts[outvar] = ans
else:
new_invars = [consts[var] if (isinstance(var, Var) and var in consts)
else var for var in eqn.invars]
new_params = dict(eqn.params)
if eqn.primitive.call_primitive or eqn.primitive.map_primitive:
new_params['call_jaxpr'] = submerge_consts(eqn.params['call_jaxpr'], [],
new_invars)
new_invars = [var for var in new_invars if isinstance(var, Var)]
else:
new_invars = [var if isinstance(var, (Var, Literal)) else Literal_(var)
for var in new_invars]
new_eqns.append(JaxprEqn(invars=new_invars, outvars=eqn.outvars,
primitive=eqn.primitive, params=new_params,
source_info=eqn.source_info))
return Jaxpr([], new_jaxpr_invars, jaxpr.outvars, new_eqns)
|
|
# -*- coding: utf-8 -*-
"""
Helios Django Views
Ben Adida (ben@adida.net)
"""
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.core.paginator import Paginator
from django.core.exceptions import PermissionDenied
from django.http import *
from django.db import transaction, IntegrityError
from mimetypes import guess_type
from validate_email import validate_email
import csv, urllib, os, base64
from crypto import algs, electionalgs, elgamal
from crypto import utils as cryptoutils
from workflows import homomorphic
from helios import utils as helios_utils
from view_utils import *
from helios_auth.security import *
from helios_auth.auth_systems import AUTH_SYSTEMS, can_list_categories
from helios_auth.models import AuthenticationExpired
from helios import security
from helios_auth import views as auth_views
import tasks
from security import *
from helios_auth.security import get_user, save_in_session_across_logouts
import uuid, datetime
from models import *
import forms, signals
# Parameters for everything
ELGAMAL_PARAMS = elgamal.Cryptosystem()
# trying new ones from OlivierP
ELGAMAL_PARAMS.p = 16328632084933010002384055033805457329601614771185955389739167309086214800406465799038583634953752941675645562182498120750264980492381375579367675648771293800310370964745767014243638518442553823973482995267304044326777047662957480269391322789378384619428596446446984694306187644767462460965622580087564339212631775817895958409016676398975671266179637898557687317076177218843233150695157881061257053019133078545928983562221396313169622475509818442661047018436264806901023966236718367204710755935899013750306107738002364137917426595737403871114187750804346564731250609196846638183903982387884578266136503697493474682071L
ELGAMAL_PARAMS.q = 61329566248342901292543872769978950870633559608669337131139375508370458778917L
ELGAMAL_PARAMS.g = 14887492224963187634282421537186040801304008017743492304481737382571933937568724473847106029915040150784031882206090286938661464458896494215273989547889201144857352611058572236578734319505128042602372864570426550855201448111746579871811249114781674309062693442442368697449970648232621880001709535143047913661432883287150003429802392229361583608686643243349727791976247247948618930423866180410558458272606627111270040091203073580238905303994472202930783207472394578498507764703191288249547659899997131166130259700604433891232298182348403175947450284433411265966789131024573629546048637848902243503970966798589660808533L
# object ready for serialization
ELGAMAL_PARAMS_LD_OBJECT = datatypes.LDObject.instantiate(ELGAMAL_PARAMS, datatype='legacy/EGParams')
# single election server? Load the single electionfrom models import Election
from django.conf import settings
def get_election_url(election):
return settings.URL_HOST + reverse(election_shortcut, args=[election.short_name])
def get_election_badge_url(election):
return settings.URL_HOST + reverse(election_badge, args=[election.uuid])
def get_election_govote_url(election):
return settings.URL_HOST + reverse(election_vote_shortcut, args=[election.short_name])
def get_castvote_url(cast_vote):
return settings.URL_HOST + reverse(castvote_shortcut, args=[cast_vote.vote_tinyhash])
##
## remote auth utils
def user_reauth(request, user):
# FIXME: should we be wary of infinite redirects here, and
# add a parameter to prevent it? Maybe.
login_url = "%s%s?%s" % (settings.SECURE_URL_HOST,
reverse(auth_views.start, args=[user.user_type]),
urllib.urlencode({'return_url':
request.get_full_path()}))
return HttpResponseRedirect(login_url)
##
## simple admin for development
##
def admin_autologin(request):
if "localhost" not in settings.URL_HOST and "127.0.0.1" not in settings.URL_HOST:
raise Http404
users = User.objects.filter(admin_p=True)
if len(users) == 0:
return HttpResponse("no admin users!")
if len(users) == 0:
return HttpResponse("no users!")
user = users[0]
request.session['user'] = {'type' : user.user_type, 'user_id' : user.user_id}
return HttpResponseRedirect("/")
##
## General election features
##
@return_json
def election_params(request):
return ELGAMAL_PARAMS_LD_OBJECT.toJSONDict()
def election_verifier(request):
return render_template(request, "tally_verifier")
def election_single_ballot_verifier(request):
return render_template(request, "ballot_verifier")
def election_shortcut(request, election_short_name):
election = Election.get_by_short_name(election_short_name)
if election:
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
else:
raise Http404
# a hidden view behind the shortcut that performs the actual perm check
@election_view()
def _election_vote_shortcut(request, election):
vote_url = "%s/booth/vote.html?%s" % (settings.SECURE_URL_HOST, urllib.urlencode({'election_url' : reverse(one_election, args=[election.uuid])}))
test_cookie_url = "%s?%s" % (reverse(test_cookie), urllib.urlencode({'continue_url' : vote_url}))
return HttpResponseRedirect(test_cookie_url)
def election_vote_shortcut(request, election_short_name):
election = Election.get_by_short_name(election_short_name)
if election:
return _election_vote_shortcut(request, election_uuid=election.uuid)
else:
raise Http404
@election_view()
def _castvote_shortcut_by_election(request, election, cast_vote):
return render_template(request, 'castvote', {'cast_vote' : cast_vote, 'vote_content': cast_vote.vote.toJSON(), 'the_voter': cast_vote.voter, 'election': election})
def castvote_shortcut(request, vote_tinyhash):
try:
cast_vote = CastVote.objects.get(vote_tinyhash = vote_tinyhash)
except CastVote.DoesNotExist:
raise Http404
return _castvote_shortcut_by_election(request, election_uuid = cast_vote.voter.election.uuid, cast_vote=cast_vote)
@trustee_check
def trustee_keygenerator(request, election, trustee):
"""
A key generator with the current params, like the trustee home but without a specific election.
"""
eg_params_json = utils.to_json(ELGAMAL_PARAMS_LD_OBJECT.toJSONDict())
return render_template(request, "election_keygenerator", {'eg_params_json': eg_params_json, 'election': election, 'trustee': trustee})
@login_required
def elections_administered(request):
if not can_create_election(request):
return HttpResponseForbidden('only an administrator has elections to administer')
user = get_user(request)
elections = Election.get_by_user_as_admin(user)
return render_template(request, "elections_administered", {'elections': elections})
@login_required
def elections_voted(request):
user = get_user(request)
elections = Election.get_by_user_as_voter(user)
return render_template(request, "elections_voted", {'elections': elections})
@login_required
def election_new(request):
if not can_create_election(request):
return HttpResponseForbidden('only an administrator can create an election')
error = None
user = get_user(request)
if request.method == "GET":
election_form = forms.ElectionForm(initial={'private_p': settings.HELIOS_PRIVATE_DEFAULT,
'help_email': user.info.get("email", '')})
else:
check_csrf(request)
election_form = forms.ElectionForm(request.POST)
if election_form.is_valid():
# create the election obj
election_params = dict(election_form.cleaned_data)
# is the short name valid
if helios_utils.urlencode(election_params['short_name']) == election_params['short_name']:
election_params['uuid'] = str(uuid.uuid1())
election_params['cast_url'] = settings.SECURE_URL_HOST + reverse(one_election_cast, args=[election_params['uuid']])
# registration starts closed
election_params['openreg'] = False
user = get_user(request)
election_params['admin'] = user
try:
election = Election.objects.create(**election_params)
election.generate_trustee(ELGAMAL_PARAMS)
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
except IntegrityError:
error = "An election with short name %s already exists" % election_params['short_name']
else:
error = "No special characters allowed in the short name."
return render_template(request, "election_new", {'election_form': election_form, 'error': error})
@election_admin(frozen=False)
def one_election_edit(request, election):
error = None
RELEVANT_FIELDS = ['short_name', 'name', 'description', 'use_voter_aliases', 'election_type', 'private_p', 'help_email', 'randomize_answer_order', 'voting_starts_at', 'voting_ends_at']
# RELEVANT_FIELDS += ['use_advanced_audit_features']
if settings.ALLOW_ELECTION_INFO_URL:
RELEVANT_FIELDS += ['election_info_url']
if request.method == "GET":
values = {}
for attr_name in RELEVANT_FIELDS:
values[attr_name] = getattr(election, attr_name)
election_form = forms.ElectionForm(values)
else:
check_csrf(request)
election_form = forms.ElectionForm(request.POST)
if election_form.is_valid():
clean_data = election_form.cleaned_data
for attr_name in RELEVANT_FIELDS:
setattr(election, attr_name, clean_data[attr_name])
try:
election.save()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
except IntegrityError:
error = "An election with short name %s already exists" % clean_data['short_name']
return render_template(request, "election_edit", {'election_form' : election_form, 'election' : election, 'error': error})
@election_admin(frozen=False)
def one_election_schedule(request, election):
return HttpResponse("foo")
@election_admin()
def one_election_extend(request, election):
if request.method == "GET":
election_form = forms.ElectionTimeExtensionForm({'voting_extended_until': election.voting_extended_until})
else:
check_csrf(request)
election_form = forms.ElectionTimeExtensionForm(request.POST)
if election_form.is_valid():
clean_data = election_form.cleaned_data
election.voting_extended_until = clean_data['voting_extended_until']
election.save()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
return render_template(request, "election_extend", {'election_form' : election_form, 'election' : election})
@election_view()
@return_json
def one_election(request, election):
if not election:
raise Http404
return election.toJSONDict(complete=True)
@election_view()
@return_json
def one_election_meta(request, election):
if not election:
raise Http404
return election.metadata
@election_view()
def election_badge(request, election):
election_url = get_election_url(election)
params = {'election': election, 'election_url': election_url}
for option_name in ['show_title', 'show_vote_link']:
params[option_name] = (request.GET.get(option_name, '1') == '1')
return render_template(request, "election_badge", params)
@election_view()
def one_election_view(request, election):
user = get_user(request)
admin_p = security.user_can_admin_election(user, election)
can_feature_p = security.user_can_feature_election(user, election)
notregistered = False
eligible_p = True
election_url = get_election_url(election)
election_badge_url = get_election_badge_url(election)
status_update_message = None
vote_url = "%s/booth/vote.html?%s" % (settings.SECURE_URL_HOST, urllib.urlencode({'election_url' : reverse(one_election, args=[election.uuid])}))
test_cookie_url = "%s?%s" % (reverse(test_cookie), urllib.urlencode({'continue_url' : vote_url}))
if user:
voter = Voter.get_by_election_and_user(election, user)
if not voter:
try:
eligible_p = _check_eligibility(election, user)
except AuthenticationExpired:
return user_reauth(request, user)
notregistered = True
else:
voter = get_voter(request, user, election)
if voter:
# cast any votes?
votes = CastVote.get_by_voter(voter)
else:
votes = None
# status update message?
if election.openreg:
if election.voting_has_started:
status_update_message = u"Vote in %s" % election.name
else:
status_update_message = u"Register to vote in %s" % election.name
# result!
if election.result:
status_update_message = u"Results are in for %s" % election.name
trustees = Trustee.get_by_election(election)
# should we show the result?
show_result = election.result_released_at or (election.result and admin_p)
return render_template(request, 'election_view',
{'election' : election, 'trustees': trustees, 'admin_p': admin_p, 'user': user,
'voter': voter, 'votes': votes, 'notregistered': notregistered, 'eligible_p': eligible_p,
'can_feature_p': can_feature_p, 'election_url' : election_url,
'vote_url': vote_url, 'election_badge_url' : election_badge_url,
'show_result': show_result,
'test_cookie_url': test_cookie_url})
def test_cookie(request):
continue_url = request.GET['continue_url']
request.session.set_test_cookie()
next_url = "%s?%s" % (reverse(test_cookie_2), urllib.urlencode({'continue_url': continue_url}))
return HttpResponseRedirect(settings.SECURE_URL_HOST + next_url)
def test_cookie_2(request):
continue_url = request.GET['continue_url']
if not request.session.test_cookie_worked():
return HttpResponseRedirect(settings.SECURE_URL_HOST + ("%s?%s" % (reverse(nocookies), urllib.urlencode({'continue_url': continue_url}))))
request.session.delete_test_cookie()
return HttpResponseRedirect(continue_url)
def nocookies(request):
retest_url = "%s?%s" % (reverse(test_cookie), urllib.urlencode({'continue_url' : request.GET['continue_url']}))
return render_template(request, 'nocookies', {'retest_url': retest_url})
##
## Trustees and Public Key
##
## As of July 2009, there are always trustees for a Helios election: one trustee is acceptable, for simple elections.
##
@election_view()
@return_json
def list_trustees(request, election):
trustees = Trustee.get_by_election(election)
return [t.toJSONDict(complete=True) for t in trustees]
@election_view()
def list_trustees_view(request, election):
trustees = Trustee.get_by_election(election)
user = get_user(request)
admin_p = security.user_can_admin_election(user, election)
return render_template(request, 'list_trustees', {'election': election, 'trustees': trustees, 'admin_p':admin_p})
@election_admin(frozen=False)
def new_trustee(request, election):
if request.method == "GET":
return render_template(request, 'new_trustee', {'election' : election})
else:
check_csrf(request)
# get the public key and the hash, and add it
name = request.POST['name']
email = request.POST['email']
trustee = Trustee(uuid = str(uuid.uuid1()), election = election, name=name, email=email)
trustee.save()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(list_trustees_view, args=[election.uuid]))
@election_admin(frozen=False)
def new_trustee_helios(request, election):
"""
Make Helios a trustee of the election
"""
election.generate_trustee(ELGAMAL_PARAMS)
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(list_trustees_view, args=[election.uuid]))
@election_admin(frozen=False)
def delete_trustee(request, election):
trustee = Trustee.get_by_election_and_uuid(election, request.GET['uuid'])
trustee.delete()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(list_trustees_view, args=[election.uuid]))
def trustee_login(request, election_short_name, trustee_email, trustee_secret):
election = Election.get_by_short_name(election_short_name)
if election:
trustee = Trustee.get_by_election_and_email(election, trustee_email)
if trustee:
if trustee.secret == trustee_secret:
set_logged_in_trustee(request, trustee)
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(trustee_home, args=[election.uuid, trustee.uuid]))
else:
# bad secret, we'll let that redirect to the front page
pass
else:
# no such trustee
raise Http404
return HttpResponseRedirect(settings.SECURE_URL_HOST + "/")
@election_admin()
def trustee_send_url(request, election, trustee_uuid):
trustee = Trustee.get_by_election_and_uuid(election, trustee_uuid)
url = settings.SECURE_URL_HOST + reverse(trustee_login, args=[election.short_name, trustee.email, trustee.secret])
body = """
You are a trustee for %s.
Your trustee dashboard is at
%s
--
Helios
""" % (election.name, url)
helios_utils.send_email(settings.SERVER_EMAIL, ["%s <%s>" % (trustee.name, trustee.email)], 'your trustee homepage for %s' % election.name, body)
logging.info("URL %s " % url)
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(list_trustees_view, args = [election.uuid]))
@trustee_check
def trustee_home(request, election, trustee):
return render_template(request, 'trustee_home', {'election': election, 'trustee':trustee})
@trustee_check
def trustee_check_sk(request, election, trustee):
return render_template(request, 'trustee_check_sk', {'election': election, 'trustee':trustee})
@trustee_check
def trustee_upload_pk(request, election, trustee):
if request.method == "POST":
# get the public key and the hash, and add it
public_key_and_proof = utils.from_json(request.POST['public_key_json'])
trustee.public_key = algs.EGPublicKey.fromJSONDict(public_key_and_proof['public_key'])
trustee.pok = algs.DLogProof.fromJSONDict(public_key_and_proof['pok'])
# verify the pok
if not trustee.public_key.verify_sk_proof(trustee.pok, algs.DLog_challenge_generator):
raise Exception("bad pok for this public key")
trustee.public_key_hash = utils.hash_b64(utils.to_json(trustee.public_key.toJSONDict()))
trustee.save()
# send a note to admin
try:
election.admin.send_message("%s - trustee pk upload" % election.name, "trustee %s (%s) uploaded a pk." % (trustee.name, trustee.email))
except:
# oh well, no message sent
pass
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(trustee_home, args=[election.uuid, trustee.uuid]))
##
## Ballot Management
##
@election_view()
@return_json
def get_randomness(request, election):
"""
get some randomness to sprinkle into the sjcl entropy pool
"""
return {
# back to urandom, it's fine
"randomness" : base64.b64encode(os.urandom(32))
#"randomness" : base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes)
}
@election_view(frozen=True)
@return_json
def encrypt_ballot(request, election):
"""
perform the ballot encryption given answers_json, a JSON'ified list of list of answers
(list of list because each question could have a list of answers if more than one.)
"""
# FIXME: maybe make this just request.POST at some point?
answers = utils.from_json(request.REQUEST['answers_json'])
ev = homomorphic.EncryptedVote.fromElectionAndAnswers(election, answers)
return ev.ld_object.includeRandomness().toJSONDict()
@election_view(frozen=True)
def post_audited_ballot(request, election):
if request.method == "POST":
raw_vote = request.POST['audited_ballot']
encrypted_vote = electionalgs.EncryptedVote.fromJSONDict(utils.from_json(raw_vote))
vote_hash = encrypted_vote.get_hash()
audited_ballot = AuditedBallot(raw_vote = raw_vote, vote_hash = vote_hash, election = election)
audited_ballot.save()
return SUCCESS
# we don't require frozen election to allow for ballot preview
@election_view()
def one_election_cast(request, election):
"""
on a GET, this is a cancellation, on a POST it's a cast
"""
if request.method == "GET":
return HttpResponseRedirect("%s%s" % (settings.SECURE_URL_HOST, reverse(one_election_view, args = [election.uuid])))
user = get_user(request)
encrypted_vote = request.POST['encrypted_vote']
save_in_session_across_logouts(request, 'encrypted_vote', encrypted_vote)
return HttpResponseRedirect("%s%s" % (settings.SECURE_URL_HOST, reverse(one_election_cast_confirm, args=[election.uuid])))
@election_view(allow_logins=True)
def password_voter_login(request, election):
"""
This is used to log in as a voter for a particular election
"""
# the URL to send the user to after they've logged in
return_url = request.REQUEST.get('return_url', reverse(one_election_cast_confirm, args=[election.uuid]))
bad_voter_login = (request.GET.get('bad_voter_login', "0") == "1")
if request.method == "GET":
# if user logged in somehow in the interim, e.g. using the login link for administration,
# then go!
if user_can_see_election(request, election):
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args = [election.uuid]))
password_login_form = forms.VoterPasswordForm()
return render_template(request, 'password_voter_login',
{'election': election,
'return_url' : return_url,
'password_login_form': password_login_form,
'bad_voter_login' : bad_voter_login})
login_url = request.REQUEST.get('login_url', None)
if not login_url:
# login depending on whether this is a private election
# cause if it's private the login is happening on the front page
if election.private_p:
login_url = reverse(password_voter_login, args=[election.uuid])
else:
login_url = reverse(one_election_cast_confirm, args=[election.uuid])
password_login_form = forms.VoterPasswordForm(request.POST)
if password_login_form.is_valid():
try:
voter = election.voter_set.get(voter_login_id = password_login_form.cleaned_data['voter_id'].strip(),
voter_password = password_login_form.cleaned_data['password'].strip())
request.session['CURRENT_VOTER_ID'] = voter.id
# if we're asked to cast, let's do it
if request.POST.get('cast_ballot') == "1":
return one_election_cast_confirm(request, election.uuid)
except Voter.DoesNotExist:
redirect_url = login_url + "?" + urllib.urlencode({
'bad_voter_login' : '1',
'return_url' : return_url
})
return HttpResponseRedirect(settings.SECURE_URL_HOST + redirect_url)
else:
# bad form, bad voter login
redirect_url = login_url + "?" + urllib.urlencode({
'bad_voter_login' : '1',
'return_url' : return_url
})
return HttpResponseRedirect(settings.SECURE_URL_HOST + redirect_url)
return HttpResponseRedirect(settings.SECURE_URL_HOST + return_url)
@election_view()
def one_election_cast_confirm(request, election):
user = get_user(request)
# if no encrypted vote, the user is reloading this page or otherwise getting here in a bad way
if (not request.session.has_key('encrypted_vote')) or request.session['encrypted_vote'] == None:
return HttpResponseRedirect(settings.URL_HOST)
# election not frozen or started
if not election.voting_has_started():
return render_template(request, 'election_not_started', {'election': election})
voter = get_voter(request, user, election)
# auto-register this person if the election is openreg
if user and not voter and election.openreg:
voter = _register_voter(election, user)
# tallied election, no vote casting
if election.encrypted_tally or election.result:
return render_template(request, 'election_tallied', {'election': election})
encrypted_vote = request.session['encrypted_vote']
vote_fingerprint = cryptoutils.hash_b64(encrypted_vote)
# if this user is a voter, prepare some stuff
if voter:
vote = datatypes.LDObject.fromDict(utils.from_json(encrypted_vote), type_hint='legacy/EncryptedVote').wrapped_obj
if 'HTTP_X_FORWARDED_FOR' in request.META:
# HTTP_X_FORWARDED_FOR sometimes have a comma delimited list of IP addresses
# Here we want the originating IP address
# See http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/x-forwarded-headers.html
# and https://en.wikipedia.org/wiki/X-Forwarded-For
cast_ip = request.META.get('HTTP_X_FORWARDED_FOR').split(',')[0].strip() or None
else:
cast_ip = request.META.get('REMOTE_ADDR', None)
# prepare the vote to cast
cast_vote_params = {
'vote' : vote,
'voter' : voter,
'vote_hash': vote_fingerprint,
'cast_at': datetime.datetime.utcnow(),
'cast_ip': cast_ip
}
cast_vote = CastVote(**cast_vote_params)
else:
cast_vote = None
if request.method == "GET":
if voter:
past_votes = CastVote.get_by_voter(voter)
if len(past_votes) == 0:
past_votes = None
else:
past_votes = None
if cast_vote:
# check for issues
issues = cast_vote.issues(election)
else:
issues = None
bad_voter_login = (request.GET.get('bad_voter_login', "0") == "1")
# status update this vote
if voter and voter.can_update_status():
status_update_label = voter.user.update_status_template() % "your smart ballot tracker"
status_update_message = "I voted in %s - my smart tracker is %s.. #heliosvoting" % (get_election_url(election),cast_vote.vote_hash[:10])
else:
status_update_label = None
status_update_message = None
# do we need to constrain the auth_systems?
if election.eligibility:
auth_systems = [e['auth_system'] for e in election.eligibility]
else:
auth_systems = None
password_only = False
if auth_systems == None or 'password' in auth_systems:
show_password = True
password_login_form = forms.VoterPasswordForm()
if auth_systems == ['password']:
password_only = True
else:
show_password = False
password_login_form = None
return_url = reverse(one_election_cast_confirm, args=[election.uuid])
login_box = auth_views.login_box_raw(request, return_url=return_url, auth_systems = auth_systems)
return render_template(request, 'election_cast_confirm', {
'login_box': login_box, 'election' : election, 'vote_fingerprint': vote_fingerprint,
'past_votes': past_votes, 'issues': issues, 'voter' : voter,
'return_url': return_url,
'status_update_label': status_update_label, 'status_update_message': status_update_message,
'show_password': show_password, 'password_only': password_only, 'password_login_form': password_login_form,
'bad_voter_login': bad_voter_login})
if request.method == "POST":
check_csrf(request)
# voting has not started or has ended
if (not election.voting_has_started()) or election.voting_has_stopped():
return HttpResponseRedirect(settings.URL_HOST)
# if user is not logged in
# bring back to the confirmation page to let him know
if not voter:
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_cast_confirm, args=[election.uuid]))
# don't store the vote in the voter's data structure until verification
cast_vote.save()
# status update?
if request.POST.get('status_update', False):
status_update_message = request.POST.get('status_update_message')
else:
status_update_message = None
# launch the verification task
tasks.cast_vote_verify_and_store.delay(
cast_vote_id = cast_vote.id,
status_update_message = status_update_message)
# remove the vote from the store
del request.session['encrypted_vote']
return HttpResponseRedirect("%s%s" % (settings.URL_HOST, reverse(one_election_cast_done, args=[election.uuid])))
@election_view()
def one_election_cast_done(request, election):
"""
This view needs to be loaded because of the IFRAME, but then this causes
problems if someone clicks "reload". So we need a strategy.
We store the ballot hash in the session
"""
user = get_user(request)
voter = get_voter(request, user, election)
if voter:
votes = CastVote.get_by_voter(voter)
vote_hash = votes[0].vote_hash
cv_url = get_castvote_url(votes[0])
# only log out if the setting says so *and* we're dealing
# with a site-wide voter. Definitely remove current_voter
# checking that voter.user != None is needed because voter.user may now be None if voter is password only
if voter.user == user and voter.user != None:
logout = settings.LOGOUT_ON_CONFIRMATION
else:
logout = False
del request.session['CURRENT_VOTER_ID']
save_in_session_across_logouts(request, 'last_vote_hash', vote_hash)
save_in_session_across_logouts(request, 'last_vote_cv_url', cv_url)
else:
vote_hash = request.session['last_vote_hash']
cv_url = request.session['last_vote_cv_url']
logout = False
# local logout ensures that there's no more
# user locally
# WHY DO WE COMMENT THIS OUT? because we want to force a full logout via the iframe, including
# from remote systems, just in case, i.e. CAS
# if logout:
# auth_views.do_local_logout(request)
# remote logout is happening asynchronously in an iframe to be modular given the logout mechanism
# include_user is set to False if logout is happening
return render_template(request, 'cast_done', {'election': election,
'vote_hash': vote_hash, 'logout': logout},
include_user=(not logout))
@election_view()
@return_json
def one_election_result(request, election):
if not election.result_released_at:
raise PermissionDenied
return election.result
@election_view()
@return_json
def one_election_result_proof(request, election):
if not election.result_released_at:
raise PermissionDenied
return election.result_proof
@election_view(frozen=True)
def one_election_bboard(request, election):
"""
UI to show election bboard
"""
after = request.GET.get('after', None)
offset= int(request.GET.get('offset', 0))
limit = int(request.GET.get('limit', 50))
order_by = 'voter_id'
# unless it's by alias, in which case we better go by UUID
if election.use_voter_aliases:
order_by = 'alias'
# if there's a specific voter
if request.GET.has_key('q'):
# FIXME: figure out the voter by voter_id
voters = []
else:
# load a bunch of voters
voters = Voter.get_by_election(election, after=after, limit=limit+1, order_by=order_by)
more_p = len(voters) > limit
if more_p:
voters = voters[0:limit]
next_after = getattr(voters[limit-1], order_by)
else:
next_after = None
return render_template(request, 'election_bboard', {'election': election, 'voters': voters, 'next_after': next_after,
'offset': offset, 'limit': limit, 'offset_plus_one': offset+1, 'offset_plus_limit': offset+limit,
'voter_id': request.GET.get('voter_id', '')})
@election_view(frozen=True)
def one_election_audited_ballots(request, election):
"""
UI to show election audited ballots
"""
if request.GET.has_key('vote_hash'):
b = AuditedBallot.get(election, request.GET['vote_hash'])
return HttpResponse(b.raw_vote, content_type="text/plain")
after = request.GET.get('after', None)
offset= int(request.GET.get('offset', 0))
limit = int(request.GET.get('limit', 50))
audited_ballots = AuditedBallot.get_by_election(election, after=after, limit=limit+1)
more_p = len(audited_ballots) > limit
if more_p:
audited_ballots = audited_ballots[0:limit]
next_after = audited_ballots[limit-1].vote_hash
else:
next_after = None
return render_template(request, 'election_audited_ballots', {'election': election, 'audited_ballots': audited_ballots, 'next_after': next_after,
'offset': offset, 'limit': limit, 'offset_plus_one': offset+1, 'offset_plus_limit': offset+limit})
@election_admin()
def voter_delete(request, election, voter_uuid):
"""
Two conditions under which a voter can be deleted:
- election is not frozen or
- election is open reg
"""
## FOR NOW we allow this to see if we can redefine the meaning of "closed reg" to be more flexible
# if election is frozen and has closed registration
#if election.frozen_at and (not election.openreg):
# raise PermissionDenied()
if election.encrypted_tally:
raise PermissionDenied()
voter = Voter.get_by_election_and_uuid(election, voter_uuid)
if voter:
if voter.vote_hash:
# send email to voter
subject = "Vote removed"
body = """
Your vote were removed from the election "%s".
--
Helios
""" % (election.name)
voter.user.send_message(subject, body)
# log it
election.append_log("Voter %s/%s and their vote were removed after election frozen" % (voter.voter_type,voter.voter_id))
elif election.frozen_at:
# log it
election.append_log("Voter %s/%s removed after election frozen" % (voter.voter_type,voter.voter_id))
voter.delete()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(voters_list_pretty, args=[election.uuid]))
@election_admin(frozen=False)
def one_election_set_reg(request, election):
"""
Set whether this is open registration or not
"""
# only allow this for public elections
if not election.private_p:
open_p = bool(int(request.GET['open_p']))
election.openreg = open_p
election.save()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(voters_list_pretty, args=[election.uuid]))
@election_admin()
def one_election_set_featured(request, election):
"""
Set whether this is a featured election or not
"""
user = get_user(request)
if not security.user_can_feature_election(user, election):
raise PermissionDenied()
featured_p = bool(int(request.GET['featured_p']))
election.featured_p = featured_p
election.save()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
@election_admin()
def one_election_archive(request, election):
archive_p = request.GET.get('archive_p', True)
if bool(int(archive_p)):
election.archived_at = datetime.datetime.utcnow()
else:
election.archived_at = None
election.save()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
@election_admin()
def one_election_copy(request, election):
# FIXME: make this a POST and CSRF protect it
# check_csrf(request)
# new short name by uuid, because it's easier and the user can change it.
new_uuid = uuid.uuid4()
new_short_name = new_uuid
new_election = Election.objects.create(
admin = election.admin,
uuid = new_uuid,
datatype = election.datatype,
short_name = new_short_name,
name = "Copy of " + election.name,
election_type = election.election_type,
private_p = election.private_p,
description = election.description,
questions = election.questions,
eligibility = election.eligibility,
openreg = election.openreg,
use_voter_aliases = election.use_voter_aliases,
use_advanced_audit_features = election.use_advanced_audit_features,
randomize_answer_order = election.randomize_answer_order,
registration_starts_at = election.registration_starts_at,
voting_starts_at = election.voting_starts_at,
voting_ends_at = election.voting_ends_at,
cast_url = settings.SECURE_URL_HOST + reverse(one_election_cast, args=[new_uuid])
)
new_election.generate_trustee(ELGAMAL_PARAMS)
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[new_election.uuid]))
# changed from admin to view because
# anyone can see the questions, the administration aspect is now
# built into the page
@election_view()
def one_election_questions(request, election):
questions_json = utils.to_json(election.questions)
user = get_user(request)
admin_p = security.user_can_admin_election(user, election)
return render_template(request, 'election_questions', {'election': election, 'questions_json' : questions_json, 'admin_p': admin_p})
def _check_eligibility(election, user):
# prevent password-users from signing up willy-nilly for other elections, doesn't make sense
if user.user_type == 'password':
return False
return election.user_eligible_p(user)
def _register_voter(election, user):
if not _check_eligibility(election, user):
return None
return Voter.register_user_in_election(user, election)
@election_view()
def one_election_register(request, election):
if not election.openreg:
return HttpResponseForbidden('registration is closed for this election')
check_csrf(request)
user = get_user(request)
voter = Voter.get_by_election_and_user(election, user)
if not voter:
voter = _register_voter(election, user)
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
@election_admin(frozen=False)
def one_election_save_questions(request, election):
check_csrf(request)
questions = utils.from_json(request.POST['questions_json'])
questions_saved = election.save_questions_safely(questions)
if questions_saved:
election.save()
return SUCCESS
else:
return FAILURE
@transaction.atomic
@election_admin(frozen=False)
def one_election_freeze(request, election):
# figure out the number of questions and trustees
issues = election.issues_before_freeze
if request.method == "GET":
return render_template(request, 'election_freeze', {'election': election, 'issues' : issues, 'issues_p' : len(issues) > 0})
else:
check_csrf(request)
election.freeze()
if get_user(request):
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
else:
return SUCCESS
def _check_election_tally_type(election):
for q in election.questions:
if q['tally_type'] != "homomorphic":
return False
return True
@election_admin(frozen=True)
def one_election_compute_tally(request, election):
"""
tallying is done all at a time now
"""
if not _check_election_tally_type(election):
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view,args=[election.election_id]))
if request.method == "GET":
return render_template(request, 'election_compute_tally', {'election': election})
check_csrf(request)
if not election.voting_ended_at:
election.voting_ended_at = datetime.datetime.utcnow()
election.tallying_started_at = datetime.datetime.utcnow()
election.save()
tasks.election_compute_tally.delay(election_id = election.id)
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view,args=[election.uuid]))
@trustee_check
def trustee_decrypt_and_prove(request, election, trustee):
if not _check_election_tally_type(election) or election.encrypted_tally == None:
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view,args=[election.uuid]))
return render_template(request, 'trustee_decrypt_and_prove', {'election': election, 'trustee': trustee})
@election_view(frozen=True)
def trustee_upload_decryption(request, election, trustee_uuid):
if not _check_election_tally_type(election) or election.encrypted_tally == None:
return FAILURE
trustee = Trustee.get_by_election_and_uuid(election, trustee_uuid)
factors_and_proofs = utils.from_json(request.POST['factors_and_proofs'])
# verify the decryption factors
trustee.decryption_factors = [[datatypes.LDObject.fromDict(factor, type_hint='core/BigInteger').wrapped_obj for factor in one_q_factors] for one_q_factors in factors_and_proofs['decryption_factors']]
# each proof needs to be deserialized
trustee.decryption_proofs = [[datatypes.LDObject.fromDict(proof, type_hint='legacy/EGZKProof').wrapped_obj for proof in one_q_proofs] for one_q_proofs in factors_and_proofs['decryption_proofs']]
if trustee.verify_decryption_proofs():
trustee.save()
try:
# send a note to admin
election.admin.send_message("%s - trustee partial decryption" % election.name, "trustee %s (%s) did their partial decryption." % (trustee.name, trustee.email))
except:
# ah well
pass
return SUCCESS
else:
return FAILURE
@election_admin(frozen=True)
def release_result(request, election):
"""
result is computed and now it's time to release the result
"""
election_url = get_election_url(election)
if request.method == "POST":
check_csrf(request)
election.release_result()
election.save()
if request.POST.get('send_email', ''):
return HttpResponseRedirect("%s?%s" % (settings.SECURE_URL_HOST + reverse(voters_email, args=[election.uuid]),urllib.urlencode({'template': 'result'})))
else:
return HttpResponseRedirect("%s" % (settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid])))
# if just viewing the form or the form is not valid
return render_template(request, 'release_result', {'election': election})
@election_admin(frozen=True)
def combine_decryptions(request, election):
"""
combine trustee decryptions
"""
election_url = get_election_url(election)
if request.method == "POST":
check_csrf(request)
election.combine_decryptions()
election.save()
return HttpResponseRedirect("%s" % (settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid])))
# if just viewing the form or the form is not valid
return render_template(request, 'combine_decryptions', {'election': election})
@election_admin(frozen=True)
def one_election_set_result_and_proof(request, election):
if election.tally_type != "homomorphic" or election.encrypted_tally == None:
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view,args=[election.election_id]))
# FIXME: check csrf
election.result = utils.from_json(request.POST['result'])
election.result_proof = utils.from_json(request.POST['result_proof'])
election.save()
if get_user(request):
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
else:
return SUCCESS
@election_view()
def voters_list_pretty(request, election):
"""
Show the list of voters
now using Django pagination
"""
# for django pagination support
page = int(request.GET.get('page', 1))
limit = int(request.GET.get('limit', 50))
q = request.GET.get('q','')
order_by = 'user__user_id'
# unless it's by alias, in which case we better go by UUID
if election.use_voter_aliases:
order_by = 'alias'
user = get_user(request)
admin_p = security.user_can_admin_election(user, election)
categories = None
eligibility_category_id = None
try:
if admin_p and can_list_categories(user.user_type):
categories = AUTH_SYSTEMS[user.user_type].list_categories(user)
eligibility_category_id = election.eligibility_category_id(user.user_type)
except AuthenticationExpired:
return user_reauth(request, user)
# files being processed
voter_files = election.voterfile_set.all()
# load a bunch of voters
# voters = Voter.get_by_election(election, order_by=order_by)
voters = Voter.objects.filter(election = election).order_by(order_by).defer('vote')
if q != '':
if election.use_voter_aliases:
voters = voters.filter(alias__icontains = q)
else:
voters = voters.filter(voter_name__icontains = q)
voter_paginator = Paginator(voters, limit)
voters_page = voter_paginator.page(page)
total_voters = voter_paginator.count
return render_template(request, 'voters_list',
{'election': election, 'voters_page': voters_page,
'voters': voters_page.object_list, 'admin_p': admin_p,
'email_voters': helios.VOTERS_EMAIL,
'limit': limit, 'total_voters': total_voters,
'upload_p': helios.VOTERS_UPLOAD, 'q' : q,
'voter_files': voter_files,
'categories': categories,
'eligibility_category_id' : eligibility_category_id})
@election_admin()
def voters_eligibility(request, election):
"""
set eligibility for voters
"""
user = get_user(request)
if request.method == "GET":
# this shouldn't happen, only POSTs
return HttpResponseRedirect("/")
# for now, private elections cannot change eligibility
if election.private_p:
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(voters_list_pretty, args=[election.uuid]))
# eligibility
eligibility = request.POST['eligibility']
if eligibility in ['openreg', 'limitedreg']:
election.openreg= True
if eligibility == 'closedreg':
election.openreg= False
if eligibility == 'limitedreg':
# now process the constraint
category_id = request.POST['category_id']
constraint = AUTH_SYSTEMS[user.user_type].generate_constraint(category_id, user)
election.eligibility = [{'auth_system': user.user_type, 'constraint': [constraint]}]
else:
election.eligibility = None
election.save()
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(voters_list_pretty, args=[election.uuid]))
@election_admin()
def voters_upload(request, election):
"""
Upload a CSV of password-based voters with
voter_id, email, name
name and email are needed only if voter_type is static
"""
## TRYING this: allowing voters upload by admin when election is frozen
#if election.frozen_at and not election.openreg:
# raise PermissionDenied()
if request.method == "GET":
return render_template(request, 'voters_upload', {'election': election, 'error': request.GET.get('e',None)})
if request.method == "POST":
if bool(request.POST.get('confirm_p', 0)):
# launch the background task to parse that file
tasks.voter_file_process.delay(voter_file_id = request.session['voter_file_id'])
del request.session['voter_file_id']
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(voters_list_pretty, args=[election.uuid]))
else:
# we need to confirm
if request.FILES.has_key('voters_file'):
voters_file = request.FILES['voters_file']
voter_file_obj = election.add_voters_file(voters_file)
request.session['voter_file_id'] = voter_file_obj.id
problems = []
# import the first few lines to check
try:
voters = [v for v in voter_file_obj.itervoters()][:5]
except:
voters = []
problems.append("your CSV file could not be processed. Please check that it is a proper CSV file.")
# check if voter emails look like emails
if False in [validate_email(v['email']) for v in voters]:
problems.append("those don't look like correct email addresses. Are you sure you uploaded a file with email address as second field?")
return render_template(request, 'voters_upload_confirm', {'election': election, 'voters': voters, 'problems': problems})
else:
return HttpResponseRedirect("%s?%s" % (settings.SECURE_URL_HOST + reverse(voters_upload, args=[election.uuid]), urllib.urlencode({'e':'no voter file specified, try again'})))
@election_admin()
def voters_upload_cancel(request, election):
"""
cancel upload of CSV file
"""
voter_file_id = request.session.get('voter_file_id', None)
if voter_file_id:
vf = VoterFile.objects.get(id = voter_file_id)
vf.delete()
del request.session['voter_file_id']
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
@election_admin(frozen=True)
def voters_email(request, election):
if not helios.VOTERS_EMAIL:
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
TEMPLATES = [
('vote', 'Time to Vote'),
('simple', 'Simple'),
('info', 'Additional Info'),
('result', 'Election Result')
]
template = request.REQUEST.get('template', 'vote')
if not template in [t[0] for t in TEMPLATES]:
raise Exception("bad template")
voter_id = request.REQUEST.get('voter_id', None)
if voter_id:
voter = Voter.get_by_election_and_voter_id(election, voter_id)
else:
voter = None
election_url = get_election_url(election)
election_vote_url = get_election_govote_url(election)
default_subject = render_template_raw(None, 'email/%s_subject.txt' % template, {
'custom_subject': "<SUBJECT>"
})
default_body = render_template_raw(None, 'email/%s_body.txt' % template, {
'election' : election,
'election_url' : election_url,
'election_vote_url' : election_vote_url,
'custom_subject' : default_subject,
'custom_message': '<BODY>',
'voter': {'vote_hash' : '<SMART_TRACKER>',
'name': '<VOTER_NAME>',
'voter_login_id': '<VOTER_LOGIN_ID>',
'voter_password': '<VOTER_PASSWORD>',
'voter_type' : election.voter_set.all()[0].voter_type,
'election' : election}
})
if request.method == "GET":
email_form = forms.EmailVotersForm(initial={'subject': election.name, 'body': ' '})
if voter:
email_form.fields['send_to'].widget = email_form.fields['send_to'].hidden_widget()
else:
email_form = forms.EmailVotersForm(request.POST)
if email_form.is_valid():
# the client knows to submit only once with a specific voter_id
subject_template = 'email/%s_subject.txt' % template
body_template = 'email/%s_body.txt' % template
extra_vars = {
'custom_subject' : email_form.cleaned_data['subject'],
'custom_message' : email_form.cleaned_data['body'],
'election_vote_url' : election_vote_url,
'election_url' : election_url,
'election' : election
}
voter_constraints_include = None
voter_constraints_exclude = None
if voter:
tasks.single_voter_email.delay(voter_uuid = voter.uuid, subject_template = subject_template, body_template = body_template, extra_vars = extra_vars)
else:
# exclude those who have not voted
if email_form.cleaned_data['send_to'] == 'voted':
voter_constraints_exclude = {'vote_hash' : None}
# include only those who have not voted
if email_form.cleaned_data['send_to'] == 'not-voted':
voter_constraints_include = {'vote_hash': None}
tasks.voters_email.delay(election_id = election.id, subject_template = subject_template, body_template = body_template, extra_vars = extra_vars, voter_constraints_include = voter_constraints_include, voter_constraints_exclude = voter_constraints_exclude)
# this batch process is all async, so we can return a nice note
return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(one_election_view, args=[election.uuid]))
return render_template(request, "voters_email", {
'email_form': email_form, 'election': election,
'voter': voter,
'default_subject': default_subject,
'default_body' : default_body,
'template' : template,
'templates' : TEMPLATES})
# Individual Voters
@election_view()
@return_json
def voter_list(request, election):
# normalize limit
limit = int(request.GET.get('limit', 500))
if limit > 500: limit = 500
voters = Voter.get_by_election(election, order_by='uuid', after=request.GET.get('after',None), limit= limit)
return [v.ld_object.toDict() for v in voters]
@election_view()
@return_json
def one_voter(request, election, voter_uuid):
"""
View a single voter's info as JSON.
"""
voter = Voter.get_by_election_and_uuid(election, voter_uuid)
if not voter:
raise Http404
return voter.toJSONDict()
@election_view()
@return_json
def voter_votes(request, election, voter_uuid):
"""
all cast votes by a voter
"""
voter = Voter.get_by_election_and_uuid(election, voter_uuid)
votes = CastVote.get_by_voter(voter)
return [v.toJSONDict() for v in votes]
@election_view()
@return_json
def voter_last_vote(request, election, voter_uuid):
"""
all cast votes by a voter
"""
voter = Voter.get_by_election_and_uuid(election, voter_uuid)
return voter.last_cast_vote().toJSONDict()
##
## cast ballots
##
@election_view()
@return_json
def ballot_list(request, election):
"""
this will order the ballots from most recent to oldest.
and optionally take a after parameter.
"""
limit = after = None
if request.GET.has_key('limit'):
limit = int(request.GET['limit'])
if request.GET.has_key('after'):
after = datetime.datetime.strptime(request.GET['after'], '%Y-%m-%d %H:%M:%S')
voters = Voter.get_by_election(election, cast=True, order_by='cast_at', limit=limit, after=after)
# we explicitly cast this to a short cast vote
return [v.last_cast_vote().ld_object.short.toDict(complete=True) for v in voters]
|
|
import proxy
import urllib
import re
import requests
from django.conf import settings
from django.http import HttpResponse
from lxml import etree as ET
from django.contrib.auth.models import User
from geoprisma.utils import isAuthorized
from geoprisma.models import Datastore
class WFSProxyFactory(object):
"""
Un proxy factory pour le WFS qui retourne le bon proxy WFS selon l'operation demande.
"""
WFS_OP_GETCAPABILITIES = 1
WFS_OP_DESCRIBEFEATURETYPE = 2
WFS_OP_GETFEATURE = 3
WFS_OP_TRANSACTION = 4
WFS_TRANSACTION_INSERT = 1
WFS_TRANSACTION_UPDATE = 2
WFS_TRANSACTION_DELETE = 3
def getWFSProxy(self, pobjService, prequest):
"""
Recupere le proxy selon l'operation
Args:
pobjService: Object service
prequest: La requete
Returns:
un proxy WFS
"""
strPostRequest = None
strContentType = None
iRequestType = None
iOPType = None
if prequest.method == 'POST':
if prequest and prequest.POST.get('data'):
strPostRequest = prequest.POST.get('data')
iRequestType = WFSProxy.REQUEST_TYPE_POSTXML
else:
strPostRequest = prequest.body
strContentType = prequest.META.get('CONTENT_TYPE')
if strContentType is not None:
strTok = strContentType.split(';')
if strTok[0] and (strTok[0] == 'text/xml' or strTok[0] == 'application/xml'):
iRequestType = WFSProxy.REQUEST_TYPE_POSTXML
else:
iRequestType = WFSProxy.REQUEST_TYPE_POST
else:
iRequestType = WFSProxy.REQUEST_TYPE_GET
if iRequestType == WFSProxy.REQUEST_TYPE_POSTXML:
iOPType = self.getOperationFromXml(strPostRequest)
if iRequestType == WFSProxy.REQUEST_TYPE_GET:
iOPType = self.getOperationFromGET(prequest)
objWFSProxy = None
if iOPType == self.WFS_OP_GETCAPABILITIES:
objWFSProxy = WFSGetCapabilityProxy(pobjService, prequest, strContentType, iRequestType, strPostRequest)
elif iOPType == self.WFS_OP_DESCRIBEFEATURETYPE:
objWFSProxy = WFSReadProxy(pobjService, prequest, strContentType, iRequestType, strPostRequest)
elif iOPType == self.WFS_OP_GETFEATURE:
objWFSProxy = WFSReadProxy(pobjService, prequest, strContentType, iRequestType, strPostRequest)
elif iOPType == self.WFS_OP_TRANSACTION:
pass
if objWFSProxy is None:
raise Exception("Proxy method not handled.")
return objWFSProxy
def getOperationFromGET(self, prequest):
"""
Recupere l'operation dans l'url
Args:
prequest: La requete
Returns:
Operation WFS
"""
strRequest = ''
for (strKey, strValue) in prequest.GET.iteritems():
if strKey.upper() == 'REQUEST':
strRequest = strValue
break
if strRequest == 'GetCapabilities':
return self.WFS_OP_GETCAPABILITIES
elif strRequest == 'DescribeFeatureType':
return self.WFS_OP_DESCRIBEFEATURETYPE
elif strRequest == 'GetFeature':
return self.WFS_OP_GETFEATURE
return None
def getOperationFromXml(self, pstrXMLRequest):
"""
Recupere l'operation dans l'XML
Args:
pstrXMLRequest: le xml contenant l'operation
Returns:
Operation WFS
"""
objDomDoc = ET.fromstring(pstrXMLRequest)
rootTag = ET.QName(objDomDoc.tag).localname
if rootTag == 'GetCapabilities':
return self.WFS_OP_GETCAPABILITIES
elif rootTag == 'DescribeFeatureType':
return self.WFS_OP_DESCRIBEFEATURETYPE
elif rootTag == 'GetFeature':
return self.WFS_OP_GETFEATURE
return None
class WFSProxy(proxy.Proxy):
"""
Class WFSProxy qui herite de la class proxy de base
"""
REQUEST_TYPE_POSTXML = 1
REQUEST_TYPE_POST = 2
REQUEST_TYPE_GET = 3
def __init__(self, pobjService, prequest, pstrContentType, piRequestType, pstrPostRequest):
"""
Constructeur
Args:
pobjService: Un object service
prequest: la requete
pstrContentType: le type de contenu
piRequestType: le type de requete
pstrPostRequest: String contenant du XML recu en POST
"""
super(WFSProxy, self).__init__(pobjService, prequest)
self.m_strContentType = pstrContentType
self.m_iRequestType = piRequestType
self.m_strPostRequest = pstrPostRequest
def getLayers(self):
"""
Recupere les couches selon le type de requete
Returns:
Tableau de couches
"""
objArrayLayers = []
namespaces = {'wfs': 'http://www.opengis.net/wfs',
'__empty_ns': ''}
if self.m_iRequestType == self.REQUEST_TYPE_POSTXML:
objDomDoc = ET.fromstring(self.m_strPostRequest)
objArrayXPathResult = objDomDoc.findall('./wfs:Query/', namespaces)
if objArrayXPathResult:
strTypeNames = str(objArrayXPathResult[0].get('typeName'))
listTok = strTypeNames.split(',')
for strTok in listTok:
objMatches = re.search('^(?:\w+:)?(\w+)(?:=\w+)?$', strTok)
if objMatches:
strFTName = objMatches.group(1)
objArrayLayers.append(strFTName)
elif self.m_iRequestType == self.REQUEST_TYPE_GET:
for (strKey, strValue) in self.m_objRequest.GET.iteritems():
if strKey.upper() == "LAYERS":
objArrayLayers = self.m_objRequest.GET.get(strKey).split(',')
break
return objArrayLayers
def getID(self):
strPathInfo = self.getPathInfo()
class WFSReadProxy(WFSProxy):
"""
Class WFSReadProxy qui herite de WFSProxy.
Elle recupere les informations selon l'operation.
"""
def getAction(self):
return self.CRUD_READ
def process(self):
"""
Traite l'information a retourner
Returns:
HttpResponse
"""
excluded_headers = ('connection',
'keep-alive',
'proxy-authenticate',
'proxy-authorization',
'te',
'trailers',
'transfer-encoding',
'upgrade',
'content-encoding',
'content-length')
if self.m_iRequestType == self.REQUEST_TYPE_POSTXML:
strPostRequest = self.m_strPostRequest
strContentType = self.m_strContentType
strPathInfo = self.getPathInfo()
url = self.addParam(self.m_objService.source)
headers = {}
if strContentType == "text/xml" or strContentType == "application/xml":
headers = {'Content-Type': strContentType+";charset=UTF-8"}
if isinstance(strPostRequest, unicode):
strPostRequest = strPostRequest.encode("utf-8")
requestUrl = requests.post(url, data=strPostRequest, headers=headers)
response = HttpResponse(requestUrl)
response_content = response.content
for header in requestUrl.headers:
if header not in excluded_headers:
response[header] = requestUrl.headers.get(header)
if requestUrl.headers['Content-Type'] == 'text/csv':
strNewlineReplaceChar = None
objCsvSeperatorChar = None
utf8DecodeOption = None
strFileName = "record.csv"
response['Content-Disposition'] = 'attachement; filename="'+strFileName+'"'
try:
strNewlineReplaceCharOption = self.m_objService.serviceoption_set.filter(name='csvNewlineReplaceChar')
if len(strNewlineReplaceCharOption) > 0:
strNewlineReplaceChar = strNewlineReplaceCharOption[0].value
objCsvSeperatorCharOption = self.m_objService.serviceoption_set.filter(name='csvSeperatorChar')
if len(objCsvSeperatorCharOption) > 0:
objCsvSeperatorChar = objCsvSeperatorCharOption[0].value
utf8DecodeOption = self.m_objService.serviceoption_set.filter(name='utf8Decode')
if len(utf8DecodeOption) > 0:
utf8DecodeOption = utf8DecodeOption[0].value
if objCsvSeperatorChar:
strCsvSeperatorChar = objCsvSeperatorChar
else:
strCsvSeperatorChar = ','
if strNewlineReplaceChar:
objArrayPatterns = re.findall('(\"[^\"]*\"'+strCsvSeperatorChar+'?|[^'+strCsvSeperatorChar+']*'+strCsvSeperatorChar+'?)', response.content)
objArrayPatterns = [pattern.decode('utf-8') for pattern in objArrayPatterns]
if objArrayPatterns:
for index in range(objArrayPatterns.count()):
strPattern = objArrayPatterns[index]
iQuotedString = re.search('(\"[^\"]*\"'+strCsvSeperatorChar+'?)', strPattern)
if iQuotedString:
objArrayPatterns[index] = strPattern.replace("\n", strNewlineReplaceChar)
objArrayPatterns = [pattern.encode('utf-8') for pattern in objArrayPatterns]
response.content = ''.join(objArrayPatterns)
except Exception:
raise
try:
if utf8DecodeOption == "true":
response_content = response_content.decode('utf-8').encode('iso-8859-1')
except Exception:
raise
response.content = ""
# Write UTF-8 BOM
response.write(u'\ufeff'.encode('utf-8'))
response.write(response_content)
return response
elif self.m_iRequestType == self.REQUEST_TYPE_GET:
strPathInfo = self.getPathInfo()
requestUrl = requests.get(self.addParam(self.m_objService.source))
response = HttpResponse(requestUrl)
for header in requestUrl.headers:
if header not in excluded_headers:
response[header] = requestUrl.headers.get(header)
return response
class WFSGetCapabilityProxy(WFSProxy):
"""
Class WFSGetCapabilityProxy qui traite seulement le getCapabilities
"""
def getAction(self):
return self.CRUD_READ
def process(self):
"""
Recupere le XML retourne par mapserver et le decoupe selon les droits de l'utilisateur.
Le decoupage est different pour chaque version WFS.
Returns:
HttpResponce
"""
excluded_headers = ('connection',
'keep-alive',
'proxy-authenticate',
'proxy-authorization',
'te',
'trailers',
'transfer-encoding',
'upgrade',
'content-encoding',
'content-length')
url = self.addParam(self.m_objService.source)
requestUrl = requests.get(url)
objXml = ET.fromstring(requestUrl.text.encode("utf-8"))
docinfo = objXml.getroottree().docinfo
wfsversion = objXml.get("version")
user = User.objects.get(email=self.m_objRequest.user)
# Gestion des sandbox
baseUrl = ""
if hasattr(settings, 'DEBUG_APP_URL') and settings.DEBUG_APP_URL:
baseUrl = settings.DEBUG_APP_URL
onlineResourceUrl = "http://"+self.m_objRequest.get_host()+baseUrl+"/gp/proxy/"+self.m_objService.slug+""
removeList = list()
#WFS VERSION 1.0.0
if wfsversion == "1.0.0":
for elem in objXml:
if elem.tag == "{http://www.opengis.net/wfs}Service":
onlineResource = elem.find("{http://www.opengis.net/wfs}OnlineResource")
onlineResource.text = onlineResourceUrl
if elem.tag == "{http://www.opengis.net/wfs}Capability":
requestList = elem.find("{http://www.opengis.net/wfs}Request")
for request in requestList:
dcptypelist = request.findall("{http://www.opengis.net/wfs}DCPType")
for dcptype in dcptypelist:
http = dcptype.find("{http://www.opengis.net/wfs}HTTP")
for method in http:
method.set("onlineResource", onlineResourceUrl)
if elem.tag == "{http://www.opengis.net/wfs}FeatureTypeList":
for featureType in elem:
if featureType.tag == "{http://www.opengis.net/wfs}FeatureType":
featureTypeName = featureType.find("{http://www.opengis.net/wfs}Name")
try:
datastore = Datastore.objects.get(service=self.m_objService,
layers=featureTypeName.text)
dataResourceList = datastore.resource_set.all()
for resource in dataResourceList:
if isAuthorized(user, resource.name, "read"):
break
else:
removeList.append(featureType)
except Datastore.DoesNotExist:
removeList.append(featureType)
for featureType in removeList:
featureTypeList = objXml.find("{http://www.opengis.net/wfs}FeatureTypeList")
featureTypeList.remove(featureType)
#WFS VERSION 1.1.0
elif wfsversion == "1.1.0":
for elem in objXml:
if elem.tag == "{http://www.opengis.net/ows}OperationsMetadata":
for operation in elem:
httptag = operation.find("{http://www.opengis.net/ows}DCP").find("{http://www.opengis.net/ows}HTTP")
for method in httptag:
method.set("{http://www.w3.org/1999/xlink}href", onlineResourceUrl)
if elem.tag == "{http://www.opengis.net/wfs}FeatureTypeList":
for featureType in elem:
if featureType.tag == "{http://www.opengis.net/wfs}FeatureType":
featureTypeName = featureType.find("{http://www.opengis.net/wfs}Name")
featureTypeNameText = featureTypeName.text.split(":")[0]
try:
datastore = Datastore.objects.get(service=self.m_objService, layers=featureTypeNameText)
dataResourceList = datastore.resource_set.all()
for resource in dataResourceList:
if isAuthorized(user, resource.name, "read"):
break
else:
removeList.append(featureType)
except Datastore.DoesNotExist:
removeList.append(featureType)
for featureType in removeList:
featureTypeList = objXml.find("{http://www.opengis.net/wfs}FeatureTypeList")
featureTypeList.remove(featureType)
responce = HttpResponse(ET.tostring(objXml, xml_declaration=True, encoding=docinfo.encoding))
#responce = HttpResponse(requestUrl)
for header in requestUrl.headers:
if header not in excluded_headers:
responce[header] = requestUrl.headers.get(header)
return responce
def getAction(self):
#return self.CRUD_READ
pass
|
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from unittest import mock
from . import model, parts, signing, test_config
def _get_identity_hash(i):
if i == '[IDENTITY]':
return 'identity'
raise
class TestGetParts(unittest.TestCase):
def test_get_parts_no_base(self):
config = test_config.TestConfig()
all_parts = parts.get_parts(config)
self.assertEqual('test.signing.bundle_id', all_parts['app'].identifier)
self.assertEqual('test.signing.bundle_id.framework',
all_parts['framework'].identifier)
self.assertEqual(
'test.signing.bundle_id.framework.AlertNotificationService',
all_parts['helper-alerts'].identifier)
self.assertEqual('test.signing.bundle_id.helper',
all_parts['helper-app'].identifier)
def test_get_parts_no_customize(self):
config = model.Distribution(channel='dev').to_config(
test_config.TestConfig())
all_parts = parts.get_parts(config)
self.assertEqual('test.signing.bundle_id', all_parts['app'].identifier)
self.assertEqual('test.signing.bundle_id.framework',
all_parts['framework'].identifier)
self.assertEqual(
'test.signing.bundle_id.framework.AlertNotificationService',
all_parts['helper-alerts'].identifier)
self.assertEqual('test.signing.bundle_id.helper',
all_parts['helper-app'].identifier)
def test_get_parts_customize(self):
config = model.Distribution(
channel='canary',
app_name_fragment='Canary',
product_dirname='canary',
creator_code='cana',
channel_customize=True).to_config(test_config.TestConfig())
all_parts = parts.get_parts(config)
self.assertEqual('test.signing.bundle_id.canary',
all_parts['app'].identifier)
self.assertEqual('test.signing.bundle_id.framework',
all_parts['framework'].identifier)
self.assertEqual(
'test.signing.bundle_id.canary.framework.AlertNotificationService',
all_parts['helper-alerts'].identifier)
self.assertEqual('test.signing.bundle_id.helper',
all_parts['helper-app'].identifier)
def test_part_options(self):
all_parts = parts.get_parts(test_config.TestConfig())
self.assertEqual(
model.CodeSignOptions.RESTRICT
| model.CodeSignOptions.LIBRARY_VALIDATION
| model.CodeSignOptions.KILL
| model.CodeSignOptions.HARDENED_RUNTIME, all_parts['app'].options)
self.assertEqual(
model.CodeSignOptions.RESTRICT
| model.CodeSignOptions.LIBRARY_VALIDATION
| model.CodeSignOptions.KILL
| model.CodeSignOptions.HARDENED_RUNTIME,
all_parts['helper-app'].options)
self.assertEqual(
model.CodeSignOptions.RESTRICT | model.CodeSignOptions.KILL
| model.CodeSignOptions.HARDENED_RUNTIME,
all_parts['helper-renderer-app'].options)
self.assertEqual(
model.CodeSignOptions.RESTRICT | model.CodeSignOptions.KILL
| model.CodeSignOptions.HARDENED_RUNTIME,
all_parts['helper-gpu-app'].options)
self.assertEqual(
model.CodeSignOptions.RESTRICT | model.CodeSignOptions.KILL
| model.CodeSignOptions.HARDENED_RUNTIME,
all_parts['helper-plugin-app'].options)
self.assertEqual(
model.CodeSignOptions.RESTRICT
| model.CodeSignOptions.LIBRARY_VALIDATION
| model.CodeSignOptions.KILL
| model.CodeSignOptions.HARDENED_RUNTIME,
all_parts['crashpad'].options)
self.assertEqual(
model.CodeSignOptions.RESTRICT
| model.CodeSignOptions.LIBRARY_VALIDATION
| model.CodeSignOptions.KILL
| model.CodeSignOptions.HARDENED_RUNTIME,
all_parts['helper-alerts'].options)
self.assertEqual(
model.CodeSignOptions.RESTRICT
| model.CodeSignOptions.LIBRARY_VALIDATION
| model.CodeSignOptions.KILL
| model.CodeSignOptions.HARDENED_RUNTIME,
all_parts['app-mode-app'].options)
def _get_plist_read(other_version):
def _plist_read(*args):
path = args[0]
first_slash = path.find('/')
path = path[first_slash + 1:]
plists = {
'$W/App Product.app/Contents/Info.plist': {
'KSVersion': '99.0.9999.99'
},
'$W/App Product.app/Contents/Frameworks/Product Framework.framework/Resources/Info.plist':
{
'CFBundleShortVersionString': other_version
}
}
return plists[path]
return _plist_read
@mock.patch.multiple('signing.signing',
**{m: mock.DEFAULT for m in ('sign_part', 'verify_part')})
@mock.patch.multiple('signing.commands', **{
m: mock.DEFAULT
for m in ('copy_files', 'move_file', 'make_dir', 'run_command')
})
@mock.patch('signing.model._get_identity_hash', _get_identity_hash)
class TestSignChrome(unittest.TestCase):
def setUp(self):
self.paths = model.Paths('/$I', '/$O', '/$W')
@mock.patch('signing.parts._sanity_check_version_keys')
def test_sign_chrome(self, *args, **kwargs):
manager = mock.Mock()
for kwarg in kwargs:
manager.attach_mock(kwargs[kwarg], kwarg)
dist = model.Distribution()
config = dist.to_config(test_config.TestConfig())
parts.sign_chrome(self.paths, config, sign_framework=True)
# No files should be moved.
self.assertEqual(0, kwargs['move_file'].call_count)
# Test that the provisioning profile is copied.
self.assertEqual(kwargs['copy_files'].mock_calls, [
mock.call.copy_files(
'/$I/Product Packaging/provisiontest.identity.provisionprofile',
'/$W/App Product.app/Contents/embedded.provisionprofile')
])
# Ensure that all the parts are signed.
signed_paths = [
call[1][2].path for call in kwargs['sign_part'].mock_calls
]
self.assertEqual(
set([p.path for p in parts.get_parts(config).values()]),
set(signed_paths))
# Make sure that the framework and the app are the last two parts that
# are signed.
self.assertEqual(signed_paths[-2:], [
'App Product.app/Contents/Frameworks/Product Framework.framework',
'App Product.app'
])
self.assertEqual(kwargs['run_command'].mock_calls, [
mock.call.run_command([
'codesign', '--display', '--requirements', '-', '--verbose=5',
'/$W/App Product.app'
]),
mock.call.run_command(
['spctl', '--assess', '-vv', '/$W/App Product.app']),
])
@mock.patch('signing.parts._sanity_check_version_keys')
def test_sign_chrome_no_assess(self, *args, **kwargs):
dist = model.Distribution()
class Config(test_config.TestConfig):
@property
def run_spctl_assess(self):
return False
config = dist.to_config(Config())
parts.sign_chrome(self.paths, config, sign_framework=True)
self.assertEqual(kwargs['run_command'].mock_calls, [
mock.call.run_command([
'codesign', '--display', '--requirements', '-', '--verbose=5',
'/$W/App Product.app'
]),
])
@mock.patch('signing.parts._sanity_check_version_keys')
def test_sign_chrome_no_provisioning(self, *args, **kwargs):
dist = model.Distribution()
class Config(test_config.TestConfig):
@property
def provisioning_profile_basename(self):
return None
config = dist.to_config(Config())
parts.sign_chrome(self.paths, config, sign_framework=True)
self.assertEqual(0, kwargs['copy_files'].call_count)
@mock.patch('signing.parts._sanity_check_version_keys')
def test_sign_chrome_no_framework(self, *args, **kwargs):
manager = mock.Mock()
for kwarg in kwargs:
manager.attach_mock(kwargs[kwarg], kwarg)
dist = model.Distribution()
config = dist.to_config(test_config.TestConfig())
parts.sign_chrome(self.paths, config, sign_framework=False)
# No files should be moved.
self.assertEqual(0, kwargs['move_file'].call_count)
# Test that the provisioning profile is copied.
self.assertEqual(kwargs['copy_files'].mock_calls, [
mock.call.copy_files(
'/$I/Product Packaging/provisiontest.identity.provisionprofile',
'/$W/App Product.app/Contents/embedded.provisionprofile')
])
# Ensure that only the app is signed.
signed_paths = [
call[1][2].path for call in kwargs['sign_part'].mock_calls
]
self.assertEqual(signed_paths, ['App Product.app'])
self.assertEqual(kwargs['run_command'].mock_calls, [
mock.call.run_command([
'codesign', '--display', '--requirements', '-', '--verbose=5',
'/$W/App Product.app'
]),
mock.call.run_command(
['spctl', '--assess', '-vv', '/$W/App Product.app']),
])
@mock.patch(
'signing.commands.read_plist',
side_effect=_get_plist_read('99.0.9999.99'))
def test_sanity_check_ok(self, read_plist, **kwargs):
config = model.Distribution().to_config(test_config.TestConfig())
parts.sign_chrome(self.paths, config, sign_framework=True)
@mock.patch(
'signing.commands.read_plist',
side_effect=_get_plist_read('55.0.5555.55'))
def test_sanity_check_bad(self, read_plist, **kwargs):
config = model.Distribution().to_config(test_config.TestConfig())
self.assertRaises(
ValueError, lambda: parts.sign_chrome(
self.paths, config, sign_framework=True))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GuestAgentsOperations:
"""GuestAgentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.connectedvmware.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
virtual_machine_name: str,
name: str,
body: Optional["_models.GuestAgent"] = None,
**kwargs: Any
) -> "_models.GuestAgent":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestAgent"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineName': self._serialize.url("virtual_machine_name", virtual_machine_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'GuestAgent')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GuestAgent', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GuestAgent', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}/guestAgents/{name}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
virtual_machine_name: str,
name: str,
body: Optional["_models.GuestAgent"] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.GuestAgent"]:
"""Implements GuestAgent PUT method.
Create Or Update GuestAgent.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param virtual_machine_name: Name of the vm.
:type virtual_machine_name: str
:param name: Name of the guestAgents.
:type name: str
:param body: Request payload.
:type body: ~azure.mgmt.connectedvmware.models.GuestAgent
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GuestAgent or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.connectedvmware.models.GuestAgent]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestAgent"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
virtual_machine_name=virtual_machine_name,
name=name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GuestAgent', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineName': self._serialize.url("virtual_machine_name", virtual_machine_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}/guestAgents/{name}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_machine_name: str,
name: str,
**kwargs: Any
) -> "_models.GuestAgent":
"""Gets GuestAgent.
Implements GuestAgent GET method.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param virtual_machine_name: Name of the vm.
:type virtual_machine_name: str
:param name: Name of the GuestAgent.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GuestAgent, or the result of cls(response)
:rtype: ~azure.mgmt.connectedvmware.models.GuestAgent
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestAgent"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineName': self._serialize.url("virtual_machine_name", virtual_machine_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GuestAgent', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}/guestAgents/{name}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
virtual_machine_name: str,
name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineName': self._serialize.url("virtual_machine_name", virtual_machine_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}/guestAgents/{name}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_machine_name: str,
name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an GuestAgent.
Implements GuestAgent DELETE method.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param virtual_machine_name: Name of the vm.
:type virtual_machine_name: str
:param name: Name of the GuestAgent.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_machine_name=virtual_machine_name,
name=name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineName': self._serialize.url("virtual_machine_name", virtual_machine_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}/guestAgents/{name}'} # type: ignore
def list_by_vm(
self,
resource_group_name: str,
virtual_machine_name: str,
**kwargs: Any
) -> AsyncIterable["_models.GuestAgentList"]:
"""Implements GET GuestAgent in a vm.
Returns the list of GuestAgent of the given vm.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param virtual_machine_name: Name of the vm.
:type virtual_machine_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GuestAgentList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.connectedvmware.models.GuestAgentList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestAgentList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vm.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineName': self._serialize.url("virtual_machine_name", virtual_machine_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('GuestAgentList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_vm.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}/guestAgents'} # type: ignore
|
|
# -*- coding: utf-8 -*-
import base64
import pickle
import random
from collections import deque
from copy import copy
try:
from itertools import combinations
except ImportError:
def combinations(items, n):
if n == 0:
yield []
else:
for i in xrange(len(items)):
for cc in combinations(items[i + 1:], n - 1):
yield [items[i]] + cc
from django.db.models import get_models
from django.db.models.fields.related import (ForeignKey, OneToOneField,
ManyToManyField)
from django.core.exceptions import SuspiciousOperation
from django.conf import settings
from django.utils.hashcompat import md5_constructor
from django.utils.importlib import import_module
from django.utils.simplejson import dumps
try:
from django.db.models.fields.generic import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
try:
qbe_formats = getattr(settings, "QBE_FORMATS_EXPORT", "qbe_formats")
formats = import_module(qbe_formats).formats
except ImportError:
from django_qbe.exports import formats
formats # Makes pyflakes happy
def qbe_models(admin_site=None, only_admin_models=False, json=False):
app_models = get_models(include_auto_created=True, include_deferred=True)
app_models_with_no_includes = get_models(include_auto_created=False,
include_deferred=False)
if admin_site:
admin_models = [m for m, a in admin_site._registry.items()]
else:
admin_models = []
if only_admin_models:
app_models = admin_models
graphs = {}
def get_field_attributes(field):
return {field.name: {
'name': field.name,
'type': type(field).__name__,
'blank': field.blank,
'label': u"%s" % field.verbose_name.lower().capitalize(),
'primary': field.primary_key,
}}
def get_through_fields(field):
# Deprecated
through_fields = []
for through_field in field.rel.through._meta.fields:
label = through_field.verbose_name.lower().capitalize()
through_fields_dic = {
'name': through_field.name,
'type': type(through_field).__name__,
'blank': through_field.blank,
'label': u"%s" % label,
}
if hasattr(through_field.rel, "to"):
through_rel = through_field.rel
through_mod = through_rel.to.__module__.split(".")[-2]
through_name = through_mod.lower().capitalize()
through_target = {
'name': through_name,
'model': through_rel.to.__name__,
'field': through_rel.get_related_field().name,
}
through_fields_dic.update({
"target": through_target,
})
through_fields.append(through_fields_dic)
return through_fields
def get_target(field):
name = field.rel.to.__module__.split(".")[-2].lower().capitalize()
target = {
'name': name,
'model': field.rel.to.__name__,
'field': field.rel.to._meta.pk.name,
}
if hasattr(field.rel, 'through'):
name = field.rel.through.__module__.split(".")[-2]
target.update({
'through': {
'name': name.lower().capitalize(),
'model': field.rel.through.__name__,
'field': field.rel.through._meta.pk.name,
}
})
return target
def get_target_relation(field, extras=""):
target = get_target(field)
relation = {
'target': target,
'type': type(field).__name__,
'source': field.name,
'arrows': extras,
}
return target, relation
def add_relation(model, field, extras=""):
target, relation = get_target_relation(field, extras=extras)
if relation not in model['relations']:
model['relations'].append(relation)
model['fields'][field.name].update({'target': target})
return model
for app_model in app_models:
model = {
'name': app_model.__name__,
'fields': {},
'relations': [],
'primary': app_model._meta.pk.name,
'collapse': ((app_model not in admin_models) or
(app_model not in app_models_with_no_includes)),
'is_auto': app_model not in app_models_with_no_includes,
}
for field in app_model._meta.fields:
field_attributes = get_field_attributes(field)
model['fields'].update(field_attributes)
if isinstance(field, ForeignKey):
model = add_relation(model, field)
elif isinstance(field, OneToOneField):
extras = "" # "[arrowhead=none arrowtail=none]"
model = add_relation(model, field, extras=extras)
if app_model._meta.many_to_many:
for field in app_model._meta.many_to_many:
field_attributes = get_field_attributes(field)
model['fields'].update(field_attributes)
if isinstance(field, ManyToManyField):
extras = "" # "[arrowhead=normal arrowtail=normal]"
model = add_relation(model, field, extras=extras)
elif isinstance(field, GenericRelation):
extras = "" # '[style="dotted"]
# [arrowhead=normal arrowtail=normal]'
model = add_relation(model, field, extras=extras)
app_title = app_model._meta.app_label.title().lower().capitalize()
if app_title not in graphs:
graphs[app_title] = {}
graphs[app_title].update({app_model.__name__: model})
if json:
return dumps(graphs)
else:
return graphs
def qbe_graph(admin_site=None, directed=False):
models = qbe_models(admin_site)
graph = {}
for k, v in models.items():
for l, w in v.items():
key = "%s.%s" % (k, l)
if key not in graph:
graph[key] = []
for relation in w['relations']:
source = relation['source']
target = relation['target']
if "through" in target:
through = target["through"]
model = "%s.%s" % (through['name'], through['model'])
value = (source, model, through['field'])
else:
model = "%s.%s" % (target['name'], target['model'])
value = (source, model, target['field'])
if value not in graph[key]:
graph[key].append(value)
if not directed:
if model not in graph:
graph[model] = []
target_field = target['field']
target_value = (target_field, key, source)
if target_value not in graph[model]:
graph[model].append(target_value)
if not graph[key]:
del graph[key]
return graph
def qbe_tree(graph, nodes, root=None):
"""
Given a graph, nodes to explore and an optinal root, do a breadth-first
search in order to return the tree.
"""
if root:
start = root
else:
index = random.randint(0, len(nodes) - 1)
start = nodes[index]
# A queue to BFS instead DFS
to_visit = deque()
cnodes = copy(nodes)
visited = set()
# Format is (parent, parent_edge, neighbor, neighbor_field)
to_visit.append((None, None, start, None))
tree = {}
while len(to_visit) != 0 and nodes:
parent, parent_edge, v, v_edge = to_visit.pop()
# Prune
if v in nodes:
nodes.remove(v)
node = graph[v]
if v not in visited and len(node) > 1:
visited.add(v)
# Preorder process
if all((parent, parent_edge, v, v_edge)):
if parent not in tree:
tree[parent] = []
if (parent_edge, v, v_edge) not in tree[parent]:
tree[parent].append((parent_edge, v, v_edge))
if v not in tree:
tree[v] = []
if (v_edge, parent, parent_edge) not in tree[v]:
tree[v].append((v_edge, parent, parent_edge))
# Iteration
for node_edge, neighbor, neighbor_edge in node:
value = (v, node_edge, neighbor, neighbor_edge)
to_visit.append(value)
remove_leafs(tree, cnodes)
return tree, (len(nodes) == 0)
def remove_leafs(tree, nodes):
def get_leafs(tree, nodes):
return [node for node, edges in tree.items()
if len(edges) < 2 and node not in nodes]
def delete_edge_leafs(tree, leaf):
for node, edges in tree.items():
for node_edge, neighbor, neighbor_edge in edges:
if leaf == neighbor:
edge = (node_edge, neighbor, neighbor_edge)
tree[node].remove(edge)
del tree[leaf]
leafs = get_leafs(tree, nodes)
iterations = 0
while leafs or iterations > len(tree) ^ 2:
for node in leafs:
if node in tree:
delete_edge_leafs(tree, node)
leafs = get_leafs(tree, nodes)
iterations += 0
return tree
def qbe_forest(graph, nodes):
forest = []
for node, edges in graph.items():
tree, are_all = qbe_tree(graph, copy(nodes), root=node)
if are_all and tree not in forest:
forest.append(tree)
return sorted(forest, cmp=lambda x, y: cmp(len(x), len(y)))
def find_all_paths(graph, start_node, end_node, path=None):
if not path:
path = []
path = path + [start_node]
if start_node == end_node:
return [path]
if start_node not in graph:
return []
paths = []
for source_edge, target_node, target_edge in graph[start_node]:
if target_node not in path:
newpaths = find_all_paths(graph, target_node, end_node, path)
for newpath in newpaths:
paths.append(newpath)
return paths
def find_minimal_paths(graph, start_node, end_node):
def find_all_paths(graph, start_node, end_node, start_edge, end_edge,
path=None, minimun=float("Inf")):
if not path:
path = []
path = path + [start_node]
if start_node == end_node:
return [path], minimun
if start_node not in graph:
return [], minimun
paths = []
if len(path) < minimun:
for source_edge, target_node, target_edge in graph[start_node]:
if target_node not in path:
newpaths, minimun = find_all_paths(graph, target_node,
end_node,
target_edge,
source_edge,
path, minimun)
for newpath in newpaths:
newpath_length = len(newpath)
if minimun > newpath_length:
minimun = newpath_length
if newpath not in paths:
paths.append(newpath)
return paths, minimun
paths, minimun = find_all_paths(graph, start_node, end_node,
start_edge=None, end_edge=None,
path=None, minimun=float("Inf"))
return paths
def _combine(items, val=None, paths=None, length=None):
if not paths:
paths = []
if not length:
length = len(items)
if not val:
val = []
if len(val) == length - 1 and len(items) == 1:
return [(val + [i]) for i in items[0]]
for i, item in enumerate(items[:-1]):
for value in item:
val.append(value)
path = _combine(items[i + 1:], val, paths, length)
val.pop()
def visited_path(x):
return x not in paths
path = filter(visited_path, path)
paths.extend(path)
return paths
def combine(items, k=None):
"""
Create a matrix in wich each row is a tuple containing one of solutions or
solution k-esima.
"""
length_items = len(items)
lengths = [len(i) for i in items]
length = reduce(lambda x, y: x * y, lengths)
repeats = [reduce(lambda x, y: x * y, lengths[i:])
for i in range(1, length_items)] + [1]
if k is not None:
k = k % length
# Python division by default is integer division (~ floor(a/b))
indices = [(k % (lengths[i] * repeats[i])) / repeats[i]
for i in range(length_items)]
return [items[i][indices[i]] for i in range(length_items)]
else:
matrix = []
for i, item in enumerate(items):
row = []
for subset in item:
row.extend([subset] * repeats[i])
times = length / len(row)
matrix.append(row * times)
# Transpose the matrix or return the columns instead rows
return zip(*matrix)
def graphs_join(graphs):
print "Combine % elements" % len(graphs)
return []
def autocomplete_graph(admin_site, current_models, directed=False):
graph = qbe_graph(admin_site, directed=directed)
valid_paths = []
for c, d in combinations(current_models, 2):
paths = find_minimal_paths(graph, c, d)
combined_sets = combine(paths)
for combined_set in combined_sets:
path = graphs_join(combined_set)
valid_paths.append(path)
# for path in paths:
# if all(map(lambda x: x in path, current_models)):
# if path not in valid_paths:
# valid_paths.append(path)
return sorted(valid_paths, cmp=lambda x, y: cmp(len(x), len(y)))
# Taken from django.contrib.sessions.backends.base
def pickle_encode(session_dict):
"Returns the given session dictionary pickled and encoded as a string."
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
pickled_md5 = md5_constructor(pickled + settings.SECRET_KEY).hexdigest()
return base64.encodestring(pickled + pickled_md5)
# Adapted from django.contrib.sessions.backends.base
def pickle_decode(session_data):
# The '+' character is translated to ' ' in request
session_data = session_data.replace(" ", "+")
# The length of the encoded string should be a multiple of 4
while (((len(session_data) / 4.0) - (len(session_data) / 4)) != 0):
session_data += u"="
encoded_data = base64.decodestring(session_data)
pickled, tamper_check = encoded_data[:-32], encoded_data[-32:]
pickled_md5 = md5_constructor(pickled + settings.SECRET_KEY).hexdigest()
if pickled_md5 != tamper_check:
raise SuspiciousOperation("User tampered with session cookie.")
try:
return pickle.loads(pickled)
# Unpickling can cause a variety of exceptions. If something happens,
# just return an empty dictionary (an empty session).
except:
return {}
|
|
"""
This module implements a transaction manager that can be used to define
transaction handling in a request or view function. It is used by transaction
control middleware and decorators.
The transaction manager can be in managed or in auto state. Auto state means the
system is using a commit-on-save strategy (actually it's more like
commit-on-change). As soon as the .save() or .delete() (or related) methods are
called, a commit is made.
Managed transactions don't do those commits, but will need some kind of manual
or implicit commits or rollbacks.
"""
import warnings
from functools import wraps
from django.db import connections, DatabaseError, DEFAULT_DB_ALIAS
from django.utils.decorators import available_attrs
class TransactionManagementError(Exception):
"""
This exception is thrown when something bad happens with transaction
management.
"""
pass
################
# Private APIs #
################
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
###########################
# Deprecated private APIs #
###########################
def abort(using=None):
"""
Roll back any ongoing transactions and clean the transaction management
state of the connection.
This method is to be used only in cases where using balanced
leave_transaction_management() calls isn't possible. For example after a
request has finished, the transaction state isn't known, yet the connection
must be cleaned up for the next request.
"""
get_connection(using).abort()
def enter_transaction_management(managed=True, using=None, forced=False):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
get_connection(using).enter_transaction_management(managed, forced)
def leave_transaction_management(using=None):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
get_connection(using).leave_transaction_management()
def is_dirty(using=None):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return get_connection(using).is_dirty()
def set_dirty(using=None):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
get_connection(using).set_dirty()
def set_clean(using=None):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
get_connection(using).set_clean()
def is_managed(using=None):
warnings.warn("'is_managed' is deprecated.",
DeprecationWarning, stacklevel=2)
def managed(flag=True, using=None):
warnings.warn("'managed' no longer serves a purpose.",
DeprecationWarning, stacklevel=2)
def commit_unless_managed(using=None):
warnings.warn("'commit_unless_managed' is now a no-op.",
DeprecationWarning, stacklevel=2)
def rollback_unless_managed(using=None):
warnings.warn("'rollback_unless_managed' is now a no-op.",
DeprecationWarning, stacklevel=2)
###############
# Public APIs #
###############
def get_autocommit(using=None):
"""
Get the autocommit status of the connection.
"""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""
Set the autocommit status of the connection.
"""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""
Commits a transaction and resets the dirty flag.
"""
get_connection(using).commit()
def rollback(using=None):
"""
Rolls back a transaction and resets the dirty flag.
"""
get_connection(using).rollback()
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""
Gets the "needs rollback" flag -- for *advanced use* only.
"""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Sets or unsets the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, it triggers a rollback when exiting the
innermost enclosing atomic block that has `savepoint=True` (that's the
default). Use this to force a rollback without raising an exception.
When `rollback` is `False`, it prevents such a rollback. Use this only
after rolling back to a known-good state! Otherwise, you break the atomic
block and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
#################################
# Decorators / context managers #
#################################
class Atomic(object):
"""
This class guarantees the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = @atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# Some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# When entering an atomic block with autocommit turned off,
# Django should only use savepoints and shouldn't commit.
# This requires at least a savepoint for the outermost block.
if not self.savepoint:
raise TransactionManagementError(
"The outermost 'atomic' block cannot use "
"savepoint = False when autocommit is off.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
# We aren't in a transaction yet; create one.
# The usual way to start a transaction is to turn autocommit off.
# However, some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# In such cases, start an explicit transaction instead, which has
# the side-effect of disabling autocommit.
if connection.features.autocommits_when_autocommit_is_off:
connection._start_transaction_under_autocommit()
connection.autocommit = False
else:
connection.set_autocommit(False)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if exc_value is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
connection.savepoint_rollback(sid)
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
connection.rollback()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
connection.savepoint_rollback(sid)
else:
# Roll back transaction
connection.rollback()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.features.autocommits_when_autocommit_is_off:
connection.autocommit = True
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
connection.in_atomic_block = False
def __call__(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = set([using])
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
############################################
# Deprecated decorators / context managers #
############################################
class Transaction(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
autocommit, commit_on_success, and commit_manually contain the
implementations of entering and exiting.
"""
def __init__(self, entering, exiting, using):
self.entering = entering
self.exiting = exiting
self.using = using
def __enter__(self):
self.entering(self.using)
def __exit__(self, exc_type, exc_value, traceback):
self.exiting(exc_value, self.using)
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def _transaction_func(entering, exiting, using):
"""
Takes 3 things, an entering function (what to do to start this block of
transaction management), an exiting function (what to do to end it, on both
success and failure, and using which can be: None, indiciating using is
DEFAULT_DB_ALIAS, a callable, indicating that using is DEFAULT_DB_ALIAS and
to return the function already wrapped.
Returns either a Transaction objects, which is both a decorator and a
context manager, or a wrapped function, if using is a callable.
"""
# Note that although the first argument is *called* `using`, it
# may actually be a function; @autocommit and @autocommit('foo')
# are both allowed forms.
if using is None:
using = DEFAULT_DB_ALIAS
if callable(using):
return Transaction(entering, exiting, DEFAULT_DB_ALIAS)(using)
return Transaction(entering, exiting, using)
def autocommit(using=None):
"""
Decorator that activates commit on save. This is Django's default behavior;
this decorator is useful if you globally activated transaction management in
your settings file and want the default behavior in some view functions.
"""
warnings.warn("autocommit is deprecated in favor of set_autocommit.",
DeprecationWarning, stacklevel=2)
def entering(using):
enter_transaction_management(managed=False, using=using)
def exiting(exc_value, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_on_success(using=None):
"""
This decorator activates commit on response. This way, if the view function
runs successfully, a commit is made; if the viewfunc produces an exception,
a rollback is made. This is one of the most common ways to do transaction
control in Web apps.
"""
warnings.warn("commit_on_success is deprecated in favor of atomic.",
DeprecationWarning, stacklevel=2)
def entering(using):
enter_transaction_management(using=using)
def exiting(exc_value, using):
try:
if exc_value is not None:
if is_dirty(using=using):
rollback(using=using)
else:
if is_dirty(using=using):
try:
commit(using=using)
except:
rollback(using=using)
raise
finally:
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_manually(using=None):
"""
Decorator that activates manual transaction control. It just disables
automatic transaction control and doesn't do any commit/rollback of its
own -- it's up to the user to call the commit and rollback functions
themselves.
"""
warnings.warn("commit_manually is deprecated in favor of set_autocommit.",
DeprecationWarning, stacklevel=2)
def entering(using):
enter_transaction_management(using=using)
def exiting(exc_value, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_on_success_unless_managed(using=None, savepoint=False):
"""
Transitory API to preserve backwards-compatibility while refactoring.
Once the legacy transaction management is fully deprecated, this should
simply be replaced by atomic. Until then, it's necessary to guarantee that
a commit occurs on exit, which atomic doesn't do when it's nested.
Unlike atomic, savepoint defaults to False because that's closer to the
legacy behavior.
"""
connection = get_connection(using)
if connection.get_autocommit() or connection.in_atomic_block:
return atomic(using, savepoint)
else:
def entering(using):
pass
def exiting(exc_value, using):
set_dirty(using=using)
return _transaction_func(entering, exiting, using)
|
|
# -*- coding: utf-8 -*-
"""
GOALS:
1) vsmany
* works resaonable for very few and very many
* stars with small k and then k becomes a percent or log percent
* distinctiveness from different location
2) 1-vs-1
* uses distinctiveness and foreground when available
* start with ratio test and ransac
3) First N decision are interactive until we learn a good threshold
4) Always show numbers between 0 and 1 spatial verification is based on
single best exemplar
x - build encoder
x - test encoder
x - monotonicity (both nondecreasing and strictly increasing)
x - cache encoder
x - cache maitainance (deleters and listers)
o - Incemental learning
o - Spceies sensitivity
* Add ability for user to relearn encoder from labeled database.
TODO:
* One class SVM http://scikit-learn.org/stable/auto_examples/svm/plot_oneclass.html
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import re
import dtool_ibeis
import numpy as np
import utool as ut
import vtool_ibeis as vt
import six # NOQA
from functools import partial
from os.path import join
from ibeis import constants as const
from ibeis.init import sysres
print, rrr, profile = ut.inject2(__name__)
def compare_score_pdfs(testres):
"""
CommandLine:
python -m ibeis.expt.test_result --exec-compare_score_pdfs --show --present
python -m ibeis.expt.test_result --exec-compare_score_pdfs --show --present --nocache
python -m ibeis.expt.test_result --exec-compare_score_pdfs --show --present -a timectrl:qindex=0:50
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> import ibeis
>>> defaultdb = 'PZ_MTEST'
>>> defaultdb = 'PZ_Master1'
>>> ibs, testres = ibeis.testdata_expts(
>>> defaultdb=defaultdb, a=['timectrl'], t=['best'])
>>> testres.compare_score_pdfs()
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
#from ibeis.init import main_helpers
import utool as ut
#import plottool_ibeis as pt
ut.ensureqt()
testres.draw_annot_scoresep(f='fail=False')
#pt.adjust_subplots(bottom=.25, top=.8)
encoder = testres.draw_feat_scoresep(f='fail=False', disttype=None)
#pt.adjust_subplots(bottom=.25, top=.8)
#encoder = testres.draw_feat_scoresep(f='fail=False', disttype=['lnbnn'])
#encoder = testres.draw_feat_scoresep(f='fail=False', disttype=['ratio'])
#encoder = testres.draw_feat_scoresep(f='fail=False', disttype=['L2_sift'])
encoder = testres.draw_feat_scoresep(f='fail=False', disttype=['lnbnn', 'fg'])
#pt.adjust_subplots(bottom=.25, top=.8)
#ibs, testres = main_helpers.testdata_expts(
# defaultdb=defaultdb, a=['timectrl'], t=['best:lnbnn_on=False,ratio_thresh=1.0'])
#encoder = testres.draw_feat_scoresep(f='fail=False', disttype=['ratio'])
#encoder = testres.draw_feat_scoresep(f='fail=False', disttype=['lnbnn'])
#encoder = testres.draw_feat_scoresep(f='fail=False', disttype=['L2_sift'])
# TODO:
return encoder
def draw_annot_scoresep(testres, f=None):
from ibeis.expt import experiment_drawing
experiment_drawing.draw_annot_scoresep(testres.ibs, testres, f=f)
def draw_feat_scoresep(testres, f=None, disttype=None):
r"""
SeeAlso:
ibeis.algo.hots.scorenorm.train_featscore_normalizer
CommandLine:
python -m ibeis --tf TestResult.draw_feat_scoresep --show
python -m ibeis --tf TestResult.draw_feat_scoresep --show -t default:sv_on=[True,False]
python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1
python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --disttype=L2_sift,fg
python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --disttype=L2_sift
python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST -t best:lnbnn_on=True --namemode=True
python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST -t best:lnbnn_on=True --namemode=False
python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST --disttype=L2_sift
python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST --disttype=L2_sift -t best:SV=False
utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1
utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --fsvx=1:2
utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --fsvx=0:1
utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 -t best:lnbnn_on=False,bar_l2_on=True --fsvx=0:1
# We want to query the oxford annots taged query
# and we want the database to contain
# K correct images per query, as well as the distractors
python -m ibeis --tf TestResult.draw_feat_scoresep --show --db Oxford -a default:qhas_any=\(query,\),dpername=1,exclude_reference=True,minqual=ok
python -m ibeis --tf TestResult.draw_feat_scoresep --show --db Oxford -a default:qhas_any=\(query,\),dpername=1,exclude_reference=True,minqual=good
python -m ibeis --tf get_annotcfg_list --db PZ_Master1 -a timectrl --acfginfo --verbtd --veryverbtd --nocache-aid
python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST --disttype=ratio
Example:
>>> # SCRIPT
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.init import main_helpers
>>> disttype = ut.get_argval('--disttype', type_=list, default=None)
>>> ibs, testres = main_helpers.testdata_expts(
>>> defaultdb='PZ_MTEST', a=['timectrl'], t=['best'])
>>> f = ut.get_argval(('--filt', '-f'), type_=list, default=[''])
>>> testres.draw_feat_scoresep(f=f)
>>> ut.show_if_requested()
"""
print('[testres] draw_feat_scoresep')
import plottool_ibeis as pt
def load_feat_scores(qreq_, qaids):
import ibeis # NOQA
from os.path import dirname, join # NOQA
# HACKY CACHE
cfgstr = qreq_.get_cfgstr(with_input=True)
cache_dir = join(dirname(dirname(ibeis.__file__)), 'TMP_FEATSCORE_CACHE')
namemode = ut.get_argval('--namemode', default=True)
fsvx = ut.get_argval('--fsvx', type_='fuzzy_subset',
default=slice(None, None, None))
threshx = ut.get_argval('--threshx', type_=int, default=None)
thresh = ut.get_argval('--thresh', type_=float, default=.9)
num = ut.get_argval('--num', type_=int, default=1)
cfg_components = [cfgstr, disttype, namemode, fsvx, threshx, thresh, f, num]
cache_cfgstr = ','.join(ut.lmap(six.text_type, cfg_components))
cache_hashid = ut.hashstr27(cache_cfgstr + '_v1')
cache_name = ('get_cfgx_feat_scores_' + cache_hashid)
@ut.cached_func(cache_name, cache_dir=cache_dir, key_argx=[],
use_cache=True)
def get_cfgx_feat_scores(qreq_, qaids):
from ibeis.algo.hots import scorenorm
cm_list = qreq_.execute(qaids)
# print('Done loading cached chipmatches')
tup = scorenorm.get_training_featscores(qreq_, cm_list, disttype,
namemode, fsvx, threshx,
thresh, num=num)
# print(ut.depth_profile(tup))
tp_scores, tn_scores, scorecfg = tup
return tp_scores, tn_scores, scorecfg
tp_scores, tn_scores, scorecfg = get_cfgx_feat_scores(qreq_, qaids)
return tp_scores, tn_scores, scorecfg
valid_case_pos = testres.case_sample2(filt_cfg=f, return_mask=False)
cfgx2_valid_qxs = ut.group_items(valid_case_pos.T[0], valid_case_pos.T[1])
test_qaids = testres.get_test_qaids()
cfgx2_valid_qaids = ut.map_dict_vals(ut.partial(ut.take, test_qaids), cfgx2_valid_qxs)
join_acfgs = True
# TODO: option to average over pipeline configurations
if join_acfgs:
groupxs = testres.get_cfgx_groupxs()
else:
groupxs = list(zip(range(len(testres.cfgx2_qreq_))))
grouped_qreqs = ut.apply_grouping(testres.cfgx2_qreq_, groupxs)
grouped_scores = []
for cfgxs, qreq_group in zip(groupxs, grouped_qreqs):
# testres.print_pcfg_info()
score_group = []
for cfgx, qreq_ in zip(cfgxs, testres.cfgx2_qreq_):
print('Loading cached chipmatches')
qaids = cfgx2_valid_qaids[cfgx]
tp_scores, tn_scores, scorecfg = load_feat_scores(qreq_, qaids)
score_group.append((tp_scores, tn_scores, scorecfg))
grouped_scores.append(score_group)
cfgx2_shortlbl = testres.get_short_cfglbls(join_acfgs=join_acfgs)
for score_group, lbl in zip(grouped_scores, cfgx2_shortlbl):
tp_scores = np.hstack(ut.take_column(score_group, 0))
tn_scores = np.hstack(ut.take_column(score_group, 1))
scorecfg = '+++'.join(ut.unique(ut.take_column(score_group, 2)))
score_group
# TODO: learn this score normalizer as a model
# encoder = vt.ScoreNormalizer(adjust=4, monotonize=False)
encoder = vt.ScoreNormalizer(adjust=2, monotonize=True)
encoder.fit_partitioned(tp_scores, tn_scores, verbose=False)
figtitle = 'Feature Scores: %s, %s' % (scorecfg, lbl)
fnum = None
vizkw = {}
sephack = ut.get_argflag('--sephack')
if not sephack:
vizkw['target_tpr'] = .95
vizkw['score_range'] = (0, 1.0)
encoder.visualize(
figtitle=figtitle, fnum=fnum,
with_scores=False,
#with_prebayes=True,
with_prebayes=False,
with_roc=True,
with_postbayes=False,
#with_postbayes=True,
**vizkw
)
icon = testres.ibs.get_database_icon()
if icon is not None:
pt.overlay_icon(icon, coords=(1, 0), bbox_alignment=(1, 0))
if ut.get_argflag('--contextadjust'):
pt.adjust_subplots(left=.1, bottom=.25, wspace=.2, hspace=.2)
pt.adjust_subplots(use_argv=True)
return encoder
def get_global_species_scorenorm_cachedir(ibs, species_text, ensure=True):
"""
Args:
species_text (str):
ensure (bool):
Returns:
str: species_cachedir
CommandLine:
python -m ibeis.control.IBEISControl --test-get_global_species_scorenorm_cachedir
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.control.IBEISControl import * # NOQA
>>> import ibeis # NOQA
>>> ibs = ibeis.opendb('testdb1')
>>> species_text = ibeis.const.TEST_SPECIES.ZEB_GREVY
>>> ensure = True
>>> species_cachedir = ibs.get_global_species_scorenorm_cachedir(species_text, ensure)
>>> resourcedir = ibs.get_ibeis_resource_dir()
>>> result = ut.relpath_unix(species_cachedir, resourcedir)
>>> print(result)
scorenorm/zebra_grevys
"""
scorenorm_cachedir = join(ibs.get_ibeis_resource_dir(),
const.PATH_NAMES.scorenormdir)
species_cachedir = join(scorenorm_cachedir, species_text)
if ensure:
ut.ensurepath(scorenorm_cachedir)
ut.ensuredir(species_cachedir)
return species_cachedir
def get_local_species_scorenorm_cachedir(ibs, species_text, ensure=True):
"""
"""
scorenorm_cachedir = join(ibs.get_cachedir(),
const.PATH_NAMES.scorenormdir)
species_cachedir = join(scorenorm_cachedir, species_text)
if ensure:
ut.ensuredir(scorenorm_cachedir)
ut.ensuredir(species_cachedir)
return species_cachedir
def get_global_distinctiveness_modeldir(ibs, ensure=True):
"""
Returns:
global_distinctdir (str): ibs internal directory
"""
global_distinctdir = sysres.get_global_distinctiveness_modeldir(ensure=ensure)
return global_distinctdir
def get_local_distinctiveness_modeldir(ibs):
"""
Returns:
distinctdir (str): ibs internal directory """
return ibs.distinctdir
class NormFeatScoreConfig(dtool_ibeis.Config):
_alias = 'nfscfg'
_param_info_list = [
ut.ParamInfo('disttype', None),
ut.ParamInfo('namemode', True),
ut.ParamInfo('fsvx', None, type_='fuzzy_subset', hideif=None),
ut.ParamInfo('threshx', None, hideif=None),
ut.ParamInfo('thresh', .9, hideif=lambda cfg: cfg['threshx'] is None),
ut.ParamInfo('num', 5),
# ut.ParamInfo('top_percent', None, hideif=None),
ut.ParamInfo('top_percent', .5, hideif=None),
]
def compare_featscores():
"""
CommandLine:
ibeis --tf compare_featscores --db PZ_MTEST \
--nfscfg :disttype=[L2_sift,lnbnn],top_percent=[None,.5,.1] -a timectrl \
-p default:K=[1,2],normalizer_rule=name \
--save featscore{db}.png --figsize=13,20 --diskshow
ibeis --tf compare_featscores --db PZ_MTEST \
--nfscfg :disttype=[L2_sift,normdist,lnbnn],top_percent=[None,.5] -a timectrl \
-p default:K=[1],normalizer_rule=name,sv_on=[True,False] \
--save featscore{db}.png --figsize=13,10 --diskshow
ibeis --tf compare_featscores --nfscfg :disttype=[L2_sift,normdist,lnbnn] \
-a timectrl -p default:K=1,normalizer_rule=name --db PZ_Master1 \
--save featscore{db}.png --figsize=13,13 --diskshow
ibeis --tf compare_featscores --nfscfg :disttype=[L2_sift,normdist,lnbnn] \
-a timectrl -p default:K=1,normalizer_rule=name --db GZ_ALL \
--save featscore{db}.png --figsize=13,13 --diskshow
ibeis --tf compare_featscores --db GIRM_Master1 \
--nfscfg ':disttype=fg,L2_sift,normdist,lnbnn' \
-a timectrl -p default:K=1,normalizer_rule=name \
--save featscore{db}.png --figsize=13,13
ibeis --tf compare_featscores --nfscfg :disttype=[L2_sift,normdist,lnbnn] \
-a timectrl -p default:K=[1,2,3],normalizer_rule=name,sv_on=False \
--db PZ_Master1 --save featscore{db}.png \
--dpi=128 --figsize=15,20 --diskshow
ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db PZ_MTEST
ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db GZ_ALL
ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db PZ_Master1
ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db GIRM_Master1
ibeis --tf compare_featscores --db PZ_MTEST \
--nfscfg :disttype=[L2_sift,normdist,lnbnn],top_percent=[None,.5,.2] -a timectrl \
-p default:K=[1],normalizer_rule=name \
--save featscore{db}.png --figsize=13,20 --diskshow
ibeis --tf compare_featscores --db PZ_MTEST \
--nfscfg :disttype=[L2_sift,normdist,lnbnn],top_percent=[None,.5,.2] -a timectrl \
-p default:K=[1],normalizer_rule=name \
--save featscore{db}.png --figsize=13,20 --diskshow
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.scorenorm import * # NOQA
>>> result = compare_featscores()
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
import plottool_ibeis as pt
import ibeis
nfs_cfg_list = NormFeatScoreConfig.from_argv_cfgs()
learnkw = {}
ibs, testres = ibeis.testdata_expts(
defaultdb='PZ_MTEST', a=['default'], p=['default:K=1'])
print('nfs_cfg_list = ' + ut.repr3(nfs_cfg_list))
encoder_list = []
lbl_list = []
varied_nfs_lbls = ut.get_varied_cfg_lbls(nfs_cfg_list)
varied_qreq_lbls = ut.get_varied_cfg_lbls(testres.cfgdict_list)
#varies_qreq_lbls
#func = ut.cached_func(cache_dir='.')(learn_featscore_normalizer)
for datakw, nlbl in zip(nfs_cfg_list, varied_nfs_lbls):
for qreq_, qlbl in zip(testres.cfgx2_qreq_, varied_qreq_lbls):
lbl = qlbl + ' ' + nlbl
cfgstr = '_'.join([datakw.get_cfgstr(), qreq_.get_full_cfgstr()])
try:
encoder = vt.ScoreNormalizer()
encoder.load(cfgstr=cfgstr)
except IOError:
print('datakw = %r' % (datakw,))
encoder = learn_featscore_normalizer(qreq_, datakw, learnkw)
encoder.save(cfgstr=cfgstr)
encoder_list.append(encoder)
lbl_list.append(lbl)
fnum = 1
# next_pnum = pt.make_pnum_nextgen(nRows=len(encoder_list), nCols=3)
next_pnum = pt.make_pnum_nextgen(nRows=len(encoder_list) + 1, nCols=3, start=3)
iconsize = 94
if len(encoder_list) > 3:
iconsize = 64
icon = qreq_.ibs.get_database_icon(max_dsize=(None, iconsize), aid=qreq_.qaids[0])
score_range = (0, .6)
for encoder, lbl in zip(encoder_list, lbl_list):
#encoder.visualize(figtitle=encoder.get_cfgstr(), with_prebayes=False, with_postbayes=False)
encoder._plot_score_support_hist(fnum, pnum=next_pnum(), titlesuf='\n' + lbl, score_range=score_range)
encoder._plot_prebayes(fnum, pnum=next_pnum())
encoder._plot_roc(fnum, pnum=next_pnum())
if icon is not None:
pt.overlay_icon(icon, coords=(1, 0), bbox_alignment=(1, 0))
nonvaried_lbl = ut.get_nonvaried_cfg_lbls(nfs_cfg_list)[0]
figtitle = qreq_.__str__() + '\n' + nonvaried_lbl
pt.set_figtitle(figtitle)
pt.adjust_subplots(hspace=.5, top=.92, bottom=.08, left=.1, right=.9)
pt.update_figsize()
pt.plt.tight_layout()
# pt.adjust_subplots(top=.95)
def learn_annotscore_normalizer(qreq_, learnkw={}):
"""
Takes the result of queries and trains a score encoder
Args:
qreq_ (ibeis.QueryRequest): query request object with hyper-parameters
Returns:
vtool_ibeis.ScoreNormalizer: encoder
CommandLine:
python -m ibeis --tf learn_annotscore_normalizer --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.scorenorm import * # NOQA
>>> import ibeis
>>> qreq_ = ibeis.testdata_qreq_(
>>> defaultdb='PZ_MTEST', a=['default'], p=['default'])
>>> encoder = learn_annotscore_normalizer(qreq_)
>>> ut.quit_if_noshow()
>>> encoder.visualize(figtitle=encoder.get_cfgstr())
>>> ut.show_if_requested()
"""
cm_list = qreq_.execute()
tup = get_training_annotscores(qreq_, cm_list)
tp_scores, tn_scores, good_tn_aidnid_pairs, good_tp_aidnid_pairs = tup
part_attrs = {
0: {'aid_pairs': good_tn_aidnid_pairs},
1: {'aid_pairs': good_tp_aidnid_pairs},
}
scores, labels, attrs = vt.flatten_scores(tp_scores, tn_scores,
part_attrs)
_learnkw = {'monotonize': True}
_learnkw.update(learnkw)
# timestamp = ut.get_timestamp()
encoder = vt.ScoreNormalizer(**_learnkw)
encoder.fit(scores, labels, attrs=attrs)
encoder.cfgstr = 'annotscore'
return encoder
def load_featscore_normalizer(normer_cfgstr):
r"""
Args:
normer_cfgstr (?):
CommandLine:
python -m ibeis.algo.hots.scorenorm --exec-load_featscore_normalizer --show
python -m ibeis.algo.hots.scorenorm --exec-load_featscore_normalizer --show --cfgstr=featscore
python -m ibeis.algo.hots.scorenorm --exec-load_featscore_normalizer --show --cfgstr=lovb
Example:
>>> # SCRIPT
>>> from ibeis.algo.hots.scorenorm import * # NOQA
>>> normer_cfgstr = ut.get_argval('--cfgstr', default='featscore')
>>> encoder = load_featscore_normalizer(normer_cfgstr)
>>> encoder.visualize(figtitle=encoder.get_cfgstr())
>>> ut.show_if_requested()
"""
encoder = vt.ScoreNormalizer()
# qreq_.lnbnn_normer.load(cfgstr=config2_.lnbnn_normer)
encoder.fuzzyload(partial_cfgstr=normer_cfgstr)
return encoder
def train_featscore_normalizer():
r"""
CommandLine:
python -m ibeis --tf train_featscore_normalizer --show
# Write Encoder
python -m ibeis --tf train_featscore_normalizer --db PZ_MTEST -t best -a default --fsvx=0 --threshx=1 --show
# Visualize encoder score adjustment
python -m ibeis --tf TestResult.draw_feat_scoresep --db PZ_MTEST -a timectrl -t best:lnbnn_normer=lnbnn_fg_featscore --show --nocache --nocache-hs
# Compare ranking with encoder vs without
python -m ibeis --tf draw_rank_cmc --db PZ_MTEST -a timectrl -t best:lnbnn_normer=[None,wulu] --show
python -m ibeis --tf draw_rank_cmc --db PZ_MTEST -a default -t best:lnbnn_normer=[None,wulu] --show
# Compare in ipynb
python -m ibeis --tf autogen_ipynb --ipynb --db PZ_MTEST -a default -t best:lnbnn_normer=[None,lnbnn_fg_0.9__featscore]
# Big Test
python -m ibeis --tf draw_rank_cmc --db PZ_Master1 -a timectrl -t best:lnbnn_normer=[None,lovb],lnbnn_norm_thresh=.5 --show
python -m ibeis --tf draw_rank_cmc --db PZ_Master1 -a timectrl -t best:lnbnn_normer=[None,jypz],lnbnn_norm_thresh=.1 --show
python -m ibeis --tf draw_rank_cmc --db PZ_Master1 -a timectrl -t best:lnbnn_normer=[None,jypz],lnbnn_norm_thresh=0 --show
# Big Train
python -m ibeis --tf learn_featscore_normalizer --db PZ_Master1 -a timectrl -t best:K=1 --fsvx=0 --threshx=1 --show
python -m ibeis --tf train_featscore_normalizer --db PZ_Master1 -a timectrl:has_none=photobomb -t best:K=1 --fsvx=0 --threshx=1 --show --ainfo
python -m ibeis --tf train_featscore_normalizer --db PZ_Master1 -a timectrl:has_none=photobomb -t best:K=1 --fsvx=0 --threshx=1 --show
python -m ibeis --tf train_featscore_normalizer --db PZ_Master1 -a timectrl:has_none=photobomb -t best:K=3 --fsvx=0 --threshx=1 --show
Example:
>>> # SCRIPT
>>> from ibeis.algo.hots.scorenorm import * # NOQA
>>> encoder = train_featscore_normalizer()
>>> encoder.visualize(figtitle=encoder.get_cfgstr())
>>> ut.show_if_requested()
"""
import ibeis
# TODO: training / loading / general external models
qreq_ = ibeis.testdata_qreq_(
defaultdb='PZ_MTEST', a=['default'], p=['default'])
datakw = NormFeatScoreConfig.from_argv_dict()
#datakw = dict(
# disttype=None,
# namemode=ut.get_argval('--namemode', default=True),
# fsvx=ut.get_argval('--fsvx', type_='fuzzy_subset',
# default=slice(None, None, None)),
# threshx=ut.get_argval('--threshx', type_=int, default=None),
# thresh=ut.get_argval('--thresh', type_=float, default=.9),
#)
encoder = learn_featscore_normalizer(qreq_, datakw=datakw)
encoder.save()
return encoder
def learn_featscore_normalizer(qreq_, datakw={}, learnkw={}):
r"""
Takes the result of queries and trains a score encoder
Args:
qreq_ (ibeis.QueryRequest): query request object with hyper-parameters
Returns:
vtool_ibeis.ScoreNormalizer: encoder
CommandLine:
python -m ibeis --tf learn_featscore_normalizer --show -t default:
python -m ibeis --tf learn_featscore_normalizer --show --fsvx=0 --threshx=1 --show
python -m ibeis --tf learn_featscore_normalizer --show -a default:size=40 -t default:fg_on=False,lnbnn_on=False,ratio_thresh=1.0,K=1,Knorm=6,sv_on=False,normalizer_rule=name --fsvx=0 --threshx=1 --show
python -m ibeis --tf learn_featscore_normalizer --show --disttype=ratio
python -m ibeis --tf learn_featscore_normalizer --show --disttype=lnbnn
python -m ibeis --tf learn_featscore_normalizer --show --disttype=L2_sift -t default:K=1
python -m ibeis --tf learn_featscore_normalizer --show --disttype=L2_sift -a timectrl -t default:K=1 --db PZ_Master1
python -m ibeis --tf learn_featscore_normalizer --show --disttype=ratio -a timectrl -t default:K=1 --db PZ_Master1
python -m ibeis --tf learn_featscore_normalizer --show --disttype=lnbnn -a timectrl -t default:K=1 --db PZ_Master1
# LOOK AT THIS
python -m ibeis --tf learn_featscore_normalizer --show --disttype=normdist -a timectrl -t default:K=1 --db PZ_Master1
#python -m ibeis --tf learn_featscore_normalizer --show --disttype=parzen -a timectrl -t default:K=1 --db PZ_Master1
#python -m ibeis --tf learn_featscore_normalizer --show --disttype=norm_parzen -a timectrl -t default:K=1 --db PZ_Master1
python -m ibeis --tf learn_featscore_normalizer --show --disttype=lnbnn --db PZ_Master1 -a timectrl -t best
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.scorenorm import * # NOQA
>>> import ibeis
>>> learnkw = {}
>>> datakw = NormFeatScoreConfig.from_argv_dict()
>>> qreq_ = ibeis.testdata_qreq_(
>>> defaultdb='PZ_MTEST', a=['default'], p=['default'])
>>> encoder = learn_featscore_normalizer(qreq_, datakw, learnkw)
>>> ut.quit_if_noshow()
>>> encoder.visualize(figtitle=encoder.get_cfgstr())
>>> ut.show_if_requested()
"""
cm_list = qreq_.execute()
print('learning scorenorm')
print('datakw = %s' % ut.repr3(datakw))
tp_scores, tn_scores, scorecfg = get_training_featscores(
qreq_, cm_list, **datakw)
_learnkw = dict(monotonize=True, adjust=2)
_learnkw.update(learnkw)
encoder = vt.ScoreNormalizer(**_learnkw)
encoder.fit_partitioned(tp_scores, tn_scores, verbose=False)
# ut.hashstr27(qreq_.get_cfgstr())
# Maintain regen command info: TODO: generalize and integrate
encoder._regen_info = {
'cmd': 'python -m ibeis --tf learn_featscore_normalizer',
'scorecfg': scorecfg,
'learnkw': learnkw,
'datakw': datakw,
'qaids': qreq_.qaids,
'daids': qreq_.daids,
'qreq_cfg': qreq_.get_full_cfgstr(),
'qreq_regen_info': getattr(qreq_, '_regen_info', {}),
}
# 'timestamp': ut.get_timestamp(),
scorecfg_safe = scorecfg
scorecfg_safe = re.sub('[' + re.escape('()= ') + ']', '', scorecfg_safe)
scorecfg_safe = re.sub('[' + re.escape('+*<>[]') + ']', '_', scorecfg_safe)
hashid = ut.hashstr27(ut.to_json(encoder._regen_info))
naidinfo = ('q%s_d%s' % (len(qreq_.qaids), len(qreq_.daids)))
cfgstr = 'featscore_{}_{}_{}_{}'.format(scorecfg_safe, qreq_.ibs.get_dbname(), naidinfo, hashid)
encoder.cfgstr = cfgstr
return encoder
def get_training_annotscores(qreq_, cm_list):
"""
Returns the annotation scores between each query and the correct groundtruth
annotations as well as the top scoring false annotations.
"""
good_tp_nscores = []
good_tn_nscores = []
good_tp_aidnid_pairs = []
good_tn_aidnid_pairs = []
ibs = qreq_.ibs
trainable = [ibs.get_annot_has_groundtruth(cm.qaid, daid_list=cm.daid_list)
for cm in cm_list]
cm_list_ = ut.compress(cm_list, trainable)
for cm in cm_list_:
qaid = cm.qaid
qnid = ibs.get_annot_name_rowids(cm.qaid)
nscoretup = cm.get_ranked_nids_and_aids()
(sorted_nids, sorted_nscores, sorted_aids, sorted_scores) = nscoretup
sorted_ndiff = -np.diff(sorted_nscores.tolist())
sorted_nids = np.array(sorted_nids)
is_positive = sorted_nids == qnid
is_negative = np.logical_and(~is_positive, sorted_nids > 0)
# Only take data from results with positive and negative examples
if not np.any(is_positive) or not np.any(is_negative):
continue
gt_rank = np.nonzero(is_positive)[0][0]
gf_rank = np.nonzero(is_negative)[0][0]
# Only take correct groundtruth scores
if gt_rank == 0 and len(sorted_nscores) > gf_rank:
if len(sorted_ndiff) > gf_rank:
good_tp_nscores.append(sorted_nscores[gt_rank])
good_tn_nscores.append(sorted_nscores[gf_rank])
good_tp_aidnid_pairs.append((qaid, sorted_nids[gt_rank]))
good_tn_aidnid_pairs.append((qaid, sorted_nids[gf_rank]))
tp_scores = np.array(good_tp_nscores)
tn_scores = np.array(good_tn_nscores)
return tp_scores, tn_scores, good_tn_aidnid_pairs, good_tp_aidnid_pairs
def get_training_featscores(qreq_, cm_list, disttype=None, namemode=True,
fsvx=slice(None, None, None), threshx=None,
thresh=.9, num=None, top_percent=None):
"""
Returns the flattened set of feature scores between each query and the
correct groundtruth annotations as well as the top scoring false
annotations.
Args:
qreq_ (ibeis.QueryRequest): query request object with hyper-parameters
cm_list (list):
disttype (None): (default = None)
namemode (bool): (default = True)
fsvx (slice): (default = slice(None, None, None))
threshx (None): (default = None)
thresh (float): only used if threshx is specified (default = 0.9)
SeeAlso:
TestResult.draw_feat_scoresep
Returns:
tuple: (tp_scores, tn_scores, scorecfg)
CommandLine:
python -m ibeis.algo.hots.scorenorm --exec-get_training_featscores
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.scorenorm import * # NOQA
>>> import ibeis
>>> cm_list, qreq_ = ibeis.testdata_cmlist(defaultdb='PZ_MTEST', a=['default:qsize=10'])
>>> disttype = None
>>> namemode = True
>>> fsvx = None
>>> threshx = 1
>>> thresh = 0.5
>>> (tp_scores, tn_scores, scorecfg) = get_training_featscores(
>>> qreq_, cm_list, disttype, namemode, fsvx, threshx, thresh)
>>> result = scorecfg
>>> print(result)
(lnbnn*fg)[fg > 0.5]
lnbnn*fg[fg > 0.5]
"""
if fsvx is None:
fsvx = slice(None, None, None)
fsv_col_lbls = None
tp_fsvs_list = []
tn_fsvs_list = []
#cm_list = [ cm_list[key] for key in sorted(cm_list.keys()) ]
# Train on only positive examples
trainable = [
qreq_.ibs.get_annot_has_groundtruth(cm.qaid, daid_list=cm.daid_list) and
cm.get_top_nids()[0] == cm.qnid
for cm in cm_list
]
cm_list_ = ut.compress(cm_list, trainable)
print('training using %d chipmatches' % (len(cm_list)))
if disttype is None:
fsv_col_lbls = cm.fsv_col_lbls
train_getter = get_training_fsv
else:
fsv_col_lbls = ut.ensure_iterable(disttype)
# annots = {} # Hack for cached vector lookups
ibs = qreq_.ibs
data_annots = ut.KeyedDefaultDict(ibs.get_annot_lazy_dict, config2_=qreq_.data_config2_)
query_annots = ut.KeyedDefaultDict(ibs.get_annot_lazy_dict, config2_=qreq_.query_config2_)
train_getter = partial(get_training_desc_dist,
fsv_col_lbls=fsv_col_lbls, qreq_=qreq_,
data_annots=data_annots,
query_annots=query_annots)
for cm in ut.ProgIter(cm_list_, lbl='building train featscores',
adjust=True, freq=1):
try:
tp_fsv, tn_fsv = train_getter(
cm, namemode=namemode, top_percent=top_percent)
tp_fsvs_list.extend(tp_fsv)
tn_fsvs_list.extend(tn_fsv)
except UnbalancedExampleException:
continue
fsv_tp = np.vstack(tp_fsvs_list)
fsv_tn = np.vstack(tn_fsvs_list)
fsv_col_lbls_ = ut.take(fsv_col_lbls, fsvx)
fsv_tp_ = fsv_tp.T[fsvx].T
fsv_tn_ = fsv_tn.T[fsvx].T
if threshx is not None:
tp_scores = fsv_tp_[fsv_tp.T[threshx] > thresh].prod(axis=1)
tn_scores = fsv_tn_[fsv_tn.T[threshx] > thresh].prod(axis=1)
threshpart = ('[' + fsv_col_lbls[threshx] + ' > ' + str(thresh) + ']')
scorecfg = '(%s)%s' % ('*'.join(fsv_col_lbls_), threshpart)
else:
tp_scores = fsv_tp_.prod(axis=1)
tn_scores = fsv_tn_.prod(axis=1)
scorecfg = '*'.join(fsv_col_lbls_)
return tp_scores, tn_scores, scorecfg
class UnbalancedExampleException(Exception):
pass
def get_topannot_training_idxs(cm, num=2):
""" top annots version
Args:
cm (ibeis.ChipMatch): object of feature correspondences and scores
num (int): number of top annots per TP/TN (default = 2)
CommandLine:
python -m ibeis.algo.hots.scorenorm --exec-get_topannot_training_idxs --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.scorenorm import * # NOQA
>>> import ibeis
>>> cm, qreq_ = ibeis.testdata_cm(defaultdb='PZ_MTEST')
>>> num = 2
>>> cm.score_annot_csum(qreq_)
>>> (tp_idxs, tn_idxs) = get_topannot_training_idxs(cm, num)
>>> result = ('(tp_idxs, tn_idxs) = %s' % (ut.repr2((tp_idxs, tn_idxs), nl=1),))
>>> print(result)
(tp_idxs, tn_idxs) = (
np.array([0, 1], dtype=np.int64),
np.array([3, 4], dtype=np.int64),
)
"""
if num is None:
num = 2
sortx = cm.argsort()
sorted_nids = cm.dnid_list.take(sortx, axis=0)
mask = sorted_nids == cm.qnid
tp_idxs_ = np.where(mask)[0]
if len(tp_idxs_) == 0:
#if ut.STRICT:
# raise Exception('tp_idxs_=0')
#else:
raise UnbalancedExampleException('tp_idxs_=0')
tn_idxs_ = np.where(~mask)[0]
if len(tn_idxs_) == 0:
#if ut.STRICT:
# raise Exception('tn_idxs_=0')
#else:
raise UnbalancedExampleException('tn_idxs_=0')
tp_idxs = tp_idxs_[0:num]
tn_idxs = tn_idxs_[0:num]
return tp_idxs, tn_idxs
def get_topname_training_idxs(cm, num=5):
"""
gets the index of the annots in the top groundtrue name and the top
groundfalse names.
Args:
cm (ibeis.ChipMatch): object of feature correspondences and scores
num(int): number of false names (default = 5)
Returns:
tuple: (tp_idxs, tn_idxs)
cm.daid_list[tp_idxs] are all of the
annotations in the correct name.
cm.daid_list[tn_idxs] are all of the
annotations in the top `num_false` incorrect names.
CommandLine:
python -m ibeis --tf get_topname_training_idxs --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.scorenorm import * # NOQA
>>> import ibeis
>>> cm, qreq_ = ibeis.testdata_cm('PZ_MTEST', a='default:dindex=0:10,qindex=0:1', t='best')
>>> num = 1
>>> (tp_idxs, tn_idxs) = get_topname_training_idxs(cm, num)
>>> result = ('(tp_idxs, tn_idxs) = %s' % (ut.repr2((tp_idxs, tn_idxs), nl=1),))
>>> print(result)
(tp_idxs, tn_idxs) = (
np.array([0, 1, 2, 3], dtype=np.int64),
[4, 5, 6, 7],
)
"""
if num is None:
num = 5
sortx = cm.name_argsort()
sorted_nids = vt.take2(cm.unique_nids, sortx)
sorted_groupxs = ut.take(cm.name_groupxs, sortx)
# name ranks of the groundtrue name
tp_ranks = np.where(sorted_nids == cm.qnid)[0]
if len(tp_ranks) == 0:
#if ut.STRICT:
# raise Exception('tp_ranks=0')
#else:
raise UnbalancedExampleException('tp_ranks=0')
# name ranks of the top groundfalse names
tp_rank = tp_ranks[0]
tn_ranks = [rank for rank in range(num + 1)
if rank != tp_rank and rank < len(sorted_groupxs)]
if len(tn_ranks) == 0:
#if ut.STRICT:
# raise Exception('tn_ranks=0')
#else:
raise UnbalancedExampleException('tn_ranks=0')
# annot idxs of the examples
tp_idxs = sorted_groupxs[tp_rank]
tn_idxs = ut.flatten(ut.take(sorted_groupxs, tn_ranks))
return tp_idxs, tn_idxs
def get_training_fsv(cm, namemode=True, num=None, top_percent=None):
"""
CommandLine:
python -m ibeis.algo.hots.scorenorm --exec-get_training_fsv --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.scorenorm import * # NOQA
>>> import ibeis
>>> num = None
>>> cm, qreq_ = ibeis.testdata_cm('PZ_MTEST', a='default:dindex=0:10,qindex=0:1', t='best')
>>> (tp_fsv, tn_fsv) = get_training_fsv(cm, namemode=False)
>>> result = ('(tp_fsv, tn_fsv) = %s' % (ut.repr2((tp_fsv, tn_fsv), nl=1),))
>>> print(result)
"""
if namemode:
tp_idxs, tn_idxs = get_topname_training_idxs(cm, num=num)
else:
tp_idxs, tn_idxs = get_topannot_training_idxs(cm, num=num)
# Keep only the top scoring half of the feature matches
# top_percent = None
if top_percent is not None:
cm_orig = cm
#cm_orig.assert_self(qreq_)
tophalf_indicies = [
ut.take_percentile(fs.argsort()[::-1], top_percent)
for fs in cm.get_fsv_prod_list()
]
cm = cm_orig.take_feature_matches(tophalf_indicies, keepscores=True)
assert np.all(cm_orig.daid_list.take(tp_idxs) == cm.daid_list.take(tp_idxs))
assert np.all(cm_orig.daid_list.take(tn_idxs) == cm.daid_list.take(tn_idxs))
#cm.assert_self(qreq_)
tp_fsv = np.vstack(ut.take(cm.fsv_list, tp_idxs))
tn_fsv = np.vstack(ut.take(cm.fsv_list, tn_idxs))
return tp_fsv, tn_fsv
@profile
def get_training_desc_dist(cm, qreq_, fsv_col_lbls=[], namemode=True,
top_percent=None, data_annots=None,
query_annots=None, num=None):
r"""
computes custom distances on prematched descriptors
SeeAlso:
python -m ibeis --tf learn_featscore_normalizer --show --disttype=ratio
python -m ibeis --tf learn_featscore_normalizer --show --disttype=normdist -a timectrl -t default:K=1 --db PZ_Master1 --save pzmaster_normdist.png
python -m ibeis --tf learn_featscore_normalizer --show --disttype=normdist -a timectrl -t default:K=1 --db PZ_MTEST --save pzmtest_normdist.png
python -m ibeis --tf learn_featscore_normalizer --show --disttype=normdist -a timectrl -t default:K=1 --db GZ_ALL
python -m ibeis --tf learn_featscore_normalizer --show --disttype=L2_sift -a timectrl -t default:K=1 --db PZ_MTEST
python -m ibeis --tf learn_featscore_normalizer --show --disttype=L2_sift -a timectrl -t default:K=1 --db PZ_Master1
python -m ibeis --tf compare_featscores --show --disttype=L2_sift,normdist -a timectrl -t default:K=1 --db GZ_ALL
CommandLine:
python -m ibeis.algo.hots.scorenorm --exec-get_training_desc_dist
python -m ibeis.algo.hots.scorenorm --exec-get_training_desc_dist:1
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.scorenorm import * # NOQA
>>> import ibeis
>>> cm, qreq_ = ibeis.testdata_cm(defaultdb='PZ_MTEST')
>>> fsv_col_lbls = ['ratio', 'lnbnn', 'L2_sift']
>>> namemode = False
>>> (tp_fsv, tn_fsv) = get_training_desc_dist(cm, qreq_, fsv_col_lbls,
>>> namemode=namemode)
>>> result = ut.repr2((tp_fsv.T, tn_fsv.T), nl=1)
>>> print(result)
Example1:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.scorenorm import * # NOQA
>>> import ibeis
>>> cm, qreq_ = ibeis.testdata_cm(defaultdb='PZ_MTEST')
>>> fsv_col_lbls = cm.fsv_col_lbls
>>> num = None
>>> namemode = False
>>> top_percent = None
>>> data_annots = None
>>> (tp_fsv1, tn_fsv1) = get_training_fsv(cm, namemode=namemode,
>>> top_percent=top_percent)
>>> (tp_fsv, tn_fsv) = get_training_desc_dist(cm, qreq_, fsv_col_lbls,
>>> namemode=namemode,
>>> top_percent=top_percent)
>>> vt.asserteq(tp_fsv1, tp_fsv)
>>> vt.asserteq(tn_fsv1, tn_fsv)
"""
if namemode:
tp_idxs, tn_idxs = get_topname_training_idxs(cm, num=num)
else:
tp_idxs, tn_idxs = get_topannot_training_idxs(cm, num=num)
if top_percent is not None:
cm_orig = cm
cm_orig.assert_self(qreq_, verbose=False)
# Keep only the top scoring half of the feature matches
tophalf_indicies = [
ut.take_percentile(fs.argsort()[::-1], top_percent)
for fs in cm.get_fsv_prod_list()
]
cm = cm_orig.take_feature_matches(tophalf_indicies, keepscores=True)
assert np.all(cm_orig.daid_list.take(tp_idxs) == cm.daid_list.take(tp_idxs))
assert np.all(cm_orig.daid_list.take(tn_idxs) == cm.daid_list.take(tn_idxs))
cm.assert_self(qreq_, verbose=False)
ibs = qreq_.ibs
query_config2_ = qreq_.extern_query_config2
data_config2_ = qreq_.extern_data_config2
special_xs, dist_xs = vt.index_partition(fsv_col_lbls, ['fg', 'ratio', 'lnbnn', 'normdist'])
dist_lbls = ut.take(fsv_col_lbls, dist_xs)
special_lbls = ut.take(fsv_col_lbls, special_xs)
qaid = cm.qaid
# cm.assert_self(qreq_=qreq_)
fsv_list = []
for idxs in [tp_idxs, tn_idxs]:
daid_list = cm.daid_list.take(idxs)
# Matching indices in query / databas images
qfxs_list = ut.take(cm.qfxs_list, idxs)
dfxs_list = ut.take(cm.dfxs_list, idxs)
need_norm = len(ut.setintersect_ordered(['ratio', 'lnbnn', 'normdist'], special_lbls)) > 0
#need_norm |= 'parzen' in special_lbls
#need_norm |= 'norm_parzen' in special_lbls
need_dists = len(dist_xs) > 0
if need_dists or need_norm:
qaid_list = [qaid] * len(qfxs_list)
qvecs_flat_m = np.vstack(ibs.get_annot_vecs_subset(qaid_list, qfxs_list, config2_=query_config2_))
dvecs_flat_m = np.vstack(ibs.get_annot_vecs_subset(daid_list, dfxs_list, config2_=data_config2_))
if need_norm:
assert any(x is not None for x in cm.filtnorm_aids), 'no normalizer known'
naids_list = ut.take(cm.naids_list, idxs)
nfxs_list = ut.take(cm.nfxs_list, idxs)
nvecs_flat = ibs.lookup_annot_vecs_subset(naids_list, nfxs_list, config2_=data_config2_,
annots=data_annots)
#import utool
#with utool.embed_on_exception_context:
#nvecs_flat_m = np.vstack(ut.compress(nvecs_flat, nvecs_flat))
_nvecs_flat_m = ut.compress(nvecs_flat, nvecs_flat)
nvecs_flat_m = vt.safe_vstack(_nvecs_flat_m, qvecs_flat_m.shape, qvecs_flat_m.dtype)
vdist = vt.L2_sift(qvecs_flat_m, dvecs_flat_m)
ndist = vt.L2_sift(qvecs_flat_m, nvecs_flat_m)
#assert np.all(vdist <= ndist)
#import utool
#utool.embed()
#vdist = vt.L2_sift_sqrd(qvecs_flat_m, dvecs_flat_m)
#ndist = vt.L2_sift_sqrd(qvecs_flat_m, nvecs_flat_m)
#vdist = vt.L2_root_sift(qvecs_flat_m, dvecs_flat_m)
#ndist = vt.L2_root_sift(qvecs_flat_m, nvecs_flat_m)
#x = cm.fsv_list[0][0:5].T[0]
#y = (ndist - vdist)[0:5]
if len(special_xs) > 0:
special_dist_list = []
# assert special_lbls[0] == 'fg'
if 'fg' in special_lbls:
# hack for fgweights (could get them directly from fsv)
qfgweights_flat_m = np.hstack(ibs.get_annot_fgweights_subset([qaid] * len(qfxs_list), qfxs_list, config2_=query_config2_))
dfgweights_flat_m = np.hstack(ibs.get_annot_fgweights_subset(daid_list, dfxs_list, config2_=data_config2_))
fgweights = np.sqrt(qfgweights_flat_m * dfgweights_flat_m)
special_dist_list.append(fgweights)
if 'ratio' in special_lbls:
# Integrating ratio test
ratio_dist = (vdist / ndist)
special_dist_list.append(ratio_dist)
if 'lnbnn' in special_lbls:
lnbnn_dist = ndist - vdist
special_dist_list.append(lnbnn_dist)
#if 'parzen' in special_lbls:
# parzen = vt.gauss_parzen_est(vdist, sigma=.38)
# special_dist_list.append(parzen)
#if 'norm_parzen' in special_lbls:
# parzen = vt.gauss_parzen_est(ndist, sigma=.38)
# special_dist_list.append(parzen)
if 'normdist' in special_lbls:
special_dist_list.append(ndist)
special_dists = np.vstack(special_dist_list).T
else:
special_dists = np.empty((0, 0))
if len(dist_xs) > 0:
# Get descriptors
# Compute descriptor distnaces
_dists = vt.compute_distances(qvecs_flat_m, dvecs_flat_m, dist_lbls)
dists = np.vstack(_dists.values()).T
else:
dists = np.empty((0, 0))
fsv = vt.rebuild_partition(special_dists.T, dists.T,
special_xs, dist_xs)
fsv = np.array(fsv).T
fsv_list.append(fsv)
tp_fsv, tn_fsv = fsv_list
return tp_fsv, tn_fsv
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.algo.hots.scorenorm
python -m ibeis.algo.hots.scorenorm --allexamples
python -m ibeis.algo.hots.scorenorm --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
|
import asyncio
import discord
import datetime
import random
from discord.ext import commands
from operator import itemgetter
from Cogs import Settings
from Cogs import DisplayName
from Cogs import Nullify
from Cogs import CheckRoles
# This is the xp module. It's likely to be retarded.
class Xp:
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.bot.loop.create_task(self.addXP())
def suppressed(self, guild, msg):
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(guild, "SuppressMentions").lower() == "yes":
return Nullify.clean(msg)
else:
return msg
async def addXP(self):
await self.bot.wait_until_ready()
while not self.bot.is_closed():
await asyncio.sleep(600) # runs only every 10 minutes (600 seconds)
print("Adding XP: {}".format(datetime.datetime.now().time().isoformat()))
for server in self.bot.guilds:
# Iterate through the servers and add them
xpAmount = int(self.settings.getServerStat(server, "HourlyXP"))
xpAmount = float(xpAmount/6)
xpRAmount = int(self.settings.getServerStat(server, "HourlyXPReal"))
xpRAmount = float(xpRAmount/6)
onlyOnline = self.settings.getServerStat(server, "RequireOnline")
for user in server.members:
bumpXP = False
if onlyOnline.lower() == "no":
bumpXP = True
else:
if str(user.status).lower() == "online":
bumpXP = True
if bumpXP:
if xpAmount > 0:
# User is online add hourly xp reserve
xpLeftover = self.settings.getUserStat(user, server, "XPLeftover")
if xpLeftover == None:
xpLeftover = 0
else:
xpLeftover = float(xpLeftover)
gainedXp = xpLeftover+xpAmount
gainedXpInt = int(gainedXp) # Strips the decimal point off
xpLeftover = float(gainedXp-gainedXpInt) # Gets the < 1 value
self.settings.setUserStat(user, server, "XPLeftover", xpLeftover)
self.settings.incrementStat(user, server, "XPReserve", gainedXpInt)
if xpRAmount > 0:
# User is online add hourly xp
xpRLeftover = self.settings.getUserStat(user, server, "XPRealLeftover")
if xpRLeftover == None:
xpRLeftover = 0
else:
xpRLeftover = float(xpRLeftover)
gainedXpR = xpRLeftover+xpRAmount
gainedXpRInt = int(gainedXpR) # Strips the decimal point off
xpRLeftover = float(gainedXpR-gainedXpRInt) # Gets the < 1 value
self.settings.setUserStat(user, server, "XPRealLeftover", xpRLeftover)
self.settings.incrementStat(user, server, "XP", gainedXpRInt)
# Check our default channels
targetChan = server.default_channel
targetChanID = self.settings.getServerStat(server, "DefaultChannel")
if len(str(targetChanID)):
# We *should* have a channel
tChan = self.bot.get_channel(int(targetChanID))
if tChan:
# We *do* have one
targetChan = tChan
# Check for promotion/demotion
try:
await CheckRoles.checkroles(user, targetChan, self.settings, self.bot)
except Exception:
continue
@commands.command(pass_context=True)
async def xp(self, ctx, *, member = None, xpAmount : int = None):
"""Gift xp to other members."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
usage = 'Usage: `{}xp [role/member] [amount]`'.format(ctx.prefix)
isRole = False
if member == None:
await ctx.message.channel.send(usage)
return
# Check for formatting issues
if xpAmount == None:
# Either xp wasn't set - or it's the last section
if type(member) is str:
# It' a string - the hope continues
roleCheck = DisplayName.checkRoleForInt(member, server)
if not roleCheck:
# Returned nothing - means there isn't even an int
msg = 'I couldn\'t find *{}* on the server.'.format(member)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.message.channel.send(msg)
return
if roleCheck["Role"]:
isRole = True
member = roleCheck["Role"]
xpAmount = roleCheck["Int"]
else:
# Role is invalid - check for member instead
nameCheck = DisplayName.checkNameForInt(member, server)
if not nameCheck:
await ctx.message.channel.send(usage)
return
if not nameCheck["Member"]:
msg = 'I couldn\'t find *{}* on the server.'.format(member)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.message.channel.send(msg)
return
member = nameCheck["Member"]
xpAmount = nameCheck["Int"]
if xpAmount == None:
# Still no xp - let's run stats instead
if isRole:
await ctx.message.channel.send(usage)
else:
await ctx.invoke(self.stats, member=member)
return
if not type(xpAmount) is int:
await ctx.message.channel.send(usage)
return
# Get our user/server stats
isAdmin = author.permissions_in(channel).administrator
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
# Check for bot admin
isBotAdmin = False
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isBotAdmin = True
break
botAdminAsAdmin = self.settings.getServerStat(server, "BotAdminAsAdmin")
adminUnlim = self.settings.getServerStat(server, "AdminUnlimited")
reserveXP = self.settings.getUserStat(author, server, "XPReserve")
requiredXP = self.settings.getServerStat(server, "RequiredXPRole")
approve = True
decrement = True
# RequiredXPRole
if requiredXP:
foundRole = False
for checkRole in author.roles:
if str(checkRole.id) == str(requiredXP):
foundRole = True
if not foundRole:
approve = False
msg = msg = 'You don\'t have the permissions to give xp.'
if xpAmount > int(reserveXP):
approve = False
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
if author == member:
approve = False
msg = 'You can\'t give yourself xp! *Nice try...*'
if xpAmount < 0:
msg = 'Only admins can take away xp!'
approve = False
# Avoid admins gaining xp
decrement = False
if xpAmount == 0:
msg = 'Wow, very generous of you...'
approve = False
# Check bot admin
if isBotAdmin and botAdminAsAdmin.lower() == "yes":
# Approve as admin
approve = True
if adminUnlim.lower() == "yes":
# No limit
decrement = False
else:
if xpAmount < 0:
# Don't decrement if negative
decrement = False
if xpAmount > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
approve = False
# Check admin last - so it overrides anything else
if isAdmin:
# No limit - approve
approve = True
if adminUnlim.lower() == "yes":
# No limit
decrement = False
else:
if xpAmount < 0:
# Don't decrement if negative
decrement = False
if xpAmount > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t give *{:,} xp*, you only have *{:,}!*'.format(xpAmount, reserveXP)
approve = False
if approve:
self.bot.dispatch("xp", member, ctx.author, xpAmount)
if isRole:
# XP was approved - let's iterate through the users of that role,
# starting with the lowest xp
#
# Work through our members
memberList = []
sMemberList = self.settings.getServerStat(server, "Members")
for amem in server.members:
if amem == author:
continue
roles = amem.roles
if member in roles:
# This member has our role
# Add to our list
for smem in sMemberList:
# Find our server entry
if str(smem["ID"]) == str(amem.id):
# Add it.
memberList.append(smem)
memSorted = sorted(memberList, key=lambda x:int(x['XP']))
if len(memSorted):
# There actually ARE members in said role
totalXP = xpAmount
if xpAmount > len(memSorted):
# More xp than members
leftover = xpAmount % len(memSorted)
eachXP = (xpAmount-leftover)/len(memSorted)
for i in range(0, len(memSorted)):
cMember = DisplayName.memberForID(memSorted[i]['ID'], server)
if leftover>0:
self.settings.incrementStat(cMember, server, "XP", eachXP+1)
leftover -= 1
else:
self.settings.incrementStat(cMember, server, "XP", eachXP)
await CheckRoles.checkroles(cMember, channel, self.settings, self.bot)
else:
for i in range(0, xpAmount):
cMember = DisplayName.memberForID(memSorted[i]['ID'], server)
self.settings.incrementStat(cMember, server, "XP", 1)
await CheckRoles.checkroles(cMember, channel, self.settings, self.bot)
# Decrement if needed
if decrement:
self.settings.incrementStat(author, server, "XPReserve", (-1*xpAmount))
msg = '*{:,} collective xp* was given to *{}!*'.format(totalXP, member.name)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await channel.send(msg)
else:
msg = 'There are no eligible members in *{}!*'.format(member.name)
await channel.send(msg)
else:
# Decrement if needed
if decrement:
self.settings.incrementStat(author, server, "XPReserve", (-1*xpAmount))
# XP was approved! Let's say it - and check decrement from gifter's xp reserve
msg = '*{}* was given *{:,} xp!*'.format(DisplayName.name(member), xpAmount)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await channel.send(msg)
self.settings.incrementStat(member, server, "XP", xpAmount)
# Now we check for promotions
await CheckRoles.checkroles(member, channel, self.settings, self.bot)
else:
await channel.send(msg)
@xp.error
async def xp_error(self, ctx, error):
msg = 'xp Error: {}'.format(error)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def defaultrole(self, ctx):
"""Lists the default role that new users are assigned."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
role = self.settings.getServerStat(ctx.message.guild, "DefaultRole")
if role == None or role == "":
msg = 'New users are not assigned a role on joining this server.'
await ctx.channel.send(msg)
else:
# Role is set - let's get its name
found = False
for arole in ctx.message.guild.roles:
if str(arole.id) == str(role):
found = True
msg = 'New users will be assigned to **{}**.'.format(arole.name)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
if not found:
msg = 'There is no role that matches id: `{}` - consider updating this setting.'.format(role)
await ctx.message.channel.send(msg)
@commands.command(pass_context=True)
async def gamble(self, ctx, bet : int = None):
"""Gamble your xp reserves for a chance at winning xp!"""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# bet must be a multiple of 10, member must have enough xpreserve to bet
msg = 'Usage: `{}gamble [xp reserve bet] (must be multiple of 10)`'.format(ctx.prefix)
if not (bet or type(bet) == int):
await channel.send(msg)
return
if not type(bet) == int:
await channel.send(msg)
return
isAdmin = author.permissions_in(channel).administrator
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
# Check for bot admin
isBotAdmin = False
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isBotAdmin = True
break
botAdminAsAdmin = self.settings.getServerStat(server, "BotAdminAsAdmin")
adminUnlim = self.settings.getServerStat(server, "AdminUnlimited")
reserveXP = self.settings.getUserStat(author, server, "XPReserve")
minRole = self.settings.getServerStat(server, "MinimumXPRole")
requiredXP = self.settings.getServerStat(server, "RequiredXPRole")
approve = True
decrement = True
# Check Bet
if not bet % 10 == 0:
approve = False
msg = 'Bets must be in multiples of *10!*'
if bet > int(reserveXP):
approve = False
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
if bet < 0:
msg = 'You can\'t bet negative amounts!'
approve = False
if bet == 0:
msg = 'You can\'t bet *nothing!*'
approve = False
# RequiredXPRole
if requiredXP:
foundRole = False
for checkRole in author.roles:
if str(checkRole.id) == str(requiredXP):
foundRole = True
if not foundRole:
approve = False
msg = msg = 'You don\'t have the permissions to gamble.'
# Check bot admin
if isBotAdmin and botAdminAsAdmin.lower() == "yes":
# Approve as admin
approve = True
if adminUnlim.lower() == "yes":
# No limit
decrement = False
else:
if bet < 0:
# Don't decrement if negative
decrement = False
if bet > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
approve = False
# Check admin last - so it overrides anything else
if isAdmin:
# No limit - approve
approve = True
if adminUnlim.lower() == "yes":
# No limit
decrement = False
else:
if bet < 0:
# Don't decrement if negative
decrement = False
if bet > int(reserveXP):
# Don't approve if we don't have enough
msg = 'You can\'t bet *{:,}*, you only have *{:,}* xp reserve!'.format(bet, reserveXP)
approve = False
if approve:
# Bet was approved - let's take the XPReserve right away
if decrement:
takeReserve = -1*bet
self.settings.incrementStat(author, server, "XPReserve", takeReserve)
# Bet more, less chance of winning, but more winnings!
if bet < 100:
betChance = 5
payout = int(bet/10)
elif bet < 500:
betChance = 15
payout = int(bet/4)
else:
betChance = 25
payout = int(bet/2)
# 1/betChance that user will win - and payout is 1/10th of the bet
randnum = random.randint(1, betChance)
# print('{} : {}'.format(randnum, betChance))
if randnum == 1:
# YOU WON!!
self.settings.incrementStat(author, server, "XP", int(payout))
msg = '*{}* bet *{:,}* and ***WON*** *{:,} xp!*'.format(DisplayName.name(author), bet, int(payout))
# Now we check for promotions
await CheckRoles.checkroles(author, channel, self.settings, self.bot)
else:
msg = '*{}* bet *{:,}* and.... *didn\'t* win. Better luck next time!'.format(DisplayName.name(author), bet)
await ctx.message.channel.send(msg)
@commands.command(pass_context=True)
async def recheckroles(self, ctx):
"""Re-iterate through all members and assign the proper roles based on their xp (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
isAdmin = author.permissions_in(channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await channel.send('You do not have sufficient privileges to access this command.')
return
message = await ctx.channel.send('Checking roles...')
changeCount = 0
for member in server.members:
# Now we check for promotions
if await CheckRoles.checkroles(member, channel, self.settings, self.bot, True):
changeCount += 1
if changeCount == 1:
await message.edit(content='Done checking roles.\n\n*1 user* updated.')
#await channel.send('Done checking roles.\n\n*1 user* updated.')
else:
await message.edit(content='Done checking roles.\n\n*{} users* updated.'.format(changeCount))
#await channel.send('Done checking roles.\n\n*{} users* updated.'.format(changeCount))
@commands.command(pass_context=True)
async def recheckrole(self, ctx, *, user : discord.Member = None):
"""Re-iterate through all members and assign the proper roles based on their xp (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
isAdmin = author.permissions_in(channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await channel.send('You do not have sufficient privileges to access this command.')
return
if not user:
user = author
# Now we check for promotions
if await CheckRoles.checkroles(user, channel, self.settings, self.bot):
await channel.send('Done checking roles.\n\n*{}* was updated.'.format(DisplayName.name(user)))
else:
await channel.send('Done checking roles.\n\n*{}* was not updated.'.format(DisplayName.name(user)))
@commands.command(pass_context=True)
async def listxproles(self, ctx):
"""Lists all roles, id's, and xp requirements for the xp promotion/demotion system."""
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
# Get the array
promoArray = self.settings.getServerStat(server, "PromotionArray")
# Sort by XP first, then by name
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
if not len(promoSorted):
roleText = "There are no roles in the xp role list. You can add some with the `{}addxprole [role] [xpamount]` command!\n".format(ctx.prefix)
else:
roleText = "**__Current Roles:__**\n\n"
for arole in promoSorted:
# Get current role name based on id
foundRole = False
for role in server.roles:
if str(role.id) == str(arole['ID']):
# We found it
foundRole = True
roleText = '{}**{}** : *{} XP*\n'.format(roleText, role.name, arole['XP'])
if not foundRole:
roleText = '{}**{}** : *{} XP* (removed from server)\n'.format(roleText, arole['Name'], arole['XP'])
# Get the required role for using the xp system
role = self.settings.getServerStat(ctx.message.guild, "RequiredXPRole")
if role == None or role == "":
roleText = '{}\n**Everyone** can give xp, gamble, and feed the bot.'.format(roleText)
else:
# Role is set - let's get its name
found = False
for arole in ctx.message.guild.roles:
if str(arole.id) == str(role):
found = True
vowels = "aeiou"
if arole.name[:1].lower() in vowels:
roleText = '{}You need to be an **{}** to *give xp*, *gamble*, or *feed* the bot.'.format(roleText, arole.name)
else:
roleText = '{}You need to be a **{}** to *give xp*, *gamble*, or *feed* the bot.'.format(roleText, arole.name)
# roleText = '{}\nYou need to be a/an **{}** to give xp, gamble, or feed the bot.'.format(roleText, arole.name)
if not found:
roleText = '{}\nThere is no role that matches id: `{}` for using the xp system - consider updating that setting.'.format(roleText, role)
# Check for suppress
if suppress:
roleText = Nullify.clean(roleText)
await channel.send(roleText)
@commands.command(pass_context=True)
async def rank(self, ctx, *, member = None):
"""Say the highest rank of a listed member."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
if member is None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.message.channel.send(msg)
return
# Create blank embed
stat_embed = discord.Embed(color=member.color)
promoArray = self.settings.getServerStat(ctx.message.guild, "PromotionArray")
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
memName = member.name
# Get member's avatar url
avURL = member.avatar_url
if not len(avURL):
avURL = member.default_avatar_url
if member.nick:
# We have a nickname
# Add to embed
stat_embed.set_author(name='{}, who currently goes by {}'.format(member.name, member.nick), icon_url=avURL)
else:
# Add to embed
stat_embed.set_author(name='{}'.format(member.name), icon_url=avURL)
highestRole = ""
for role in promoSorted:
# We *can* have this role, let's see if we already do
currentRole = None
for aRole in member.roles:
# Get the role that corresponds to the id
if str(aRole.id) == str(role['ID']):
# We found it
highestRole = aRole.name
if highestRole == "":
msg = '*{}* has not acquired a rank yet.'.format(DisplayName.name(member))
# Add Rank
stat_embed.add_field(name="Current Rank", value='None acquired yet', inline=True)
else:
msg = '*{}* is a **{}**!'.format(DisplayName.name(member), highestRole)
# Add Rank
stat_embed.add_field(name="Current Rank", value=highestRole, inline=True)
# await ctx.message.channel.send(msg)
await ctx.message.channel.send(embed=stat_embed)
@rank.error
async def rank_error(self, error, ctx):
msg = 'rank Error: {}'.format(error)
await ctx.channel.send(msg)
# List the top 10 xp-holders
@commands.command(pass_context=True)
async def leaderboard(self, ctx, total : int = 10):
"""List the top xp-holders (max of 50)."""
promoArray = self.settings.getServerStat(ctx.message.guild, "Members")
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
# promoSorted = sorted(promoArray, key=itemgetter('XP'))
startIndex = 0
if total > 50:
total = 50
if total < 1:
total = 1
msg = ""
if len(promoSorted) < total:
total = len(promoSorted)
if len(promoSorted):
# makes sure we have at least 1 user - shouldn't be necessary though
startIndex = len(promoSorted)-1
msg = "**Top** ***{}*** **XP-Holders in** ***{}***:\n".format(total, self.suppressed(ctx.guild, ctx.guild.name))
for i in range(0, total):
# Loop through from startIndex to startIndex+total-1
index = startIndex-i
# cMemName = "{}#{}".format(promoSorted[index]['Name'], promoSorted[index]['Discriminator'])
cMember = DisplayName.memberForID(promoSorted[index]['ID'], ctx.message.guild)
#if ctx.message.guild.get_member_named(cMemName):
# Member exists
#cMember = ctx.message.guild.get_member_named(cMemName)
#else:
#cMember = None
if cMember:
cMemberDisplay = DisplayName.name(cMember)
else:
cMemberDisplay = promoSorted[index]['Name']
msg = '{}\n{}. *{}* - *{:,} xp*'.format(msg, i+1, cMemberDisplay, promoSorted[index]['XP'])
await ctx.message.channel.send(msg)
# List the top 10 xp-holders
@commands.command(pass_context=True)
async def bottomxp(self, ctx, total : int = 10):
"""List the bottom xp-holders (max of 50)."""
promoArray = self.settings.getServerStat(ctx.message.guild, "Members")
# promoSorted = sorted(promoArray, key=itemgetter('XP'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
startIndex = 0
if total > 50:
total = 50
if total < 1:
total = 1
msg = ""
if len(promoSorted) < total:
total = len(promoSorted)
if len(promoSorted):
# makes sure we have at least 1 user - shouldn't be necessary though
msg = "**Bottom** ***{}*** **XP-Holders in** ***{}***:\n".format(total, self.suppressed(ctx.guild, ctx.guild.name))
for i in range(0, total):
# Loop through from startIndex to startIndex+total-1
index = startIndex+i
# cMemName = "{}#{}".format(promoSorted[index]['Name'], promoSorted[index]['Discriminator'])
cMember = DisplayName.memberForID(promoSorted[index]['ID'], ctx.message.guild)
#if ctx.message.guild.get_member_named(cMemName):
# Member exists
#cMember = ctx.message.guild.get_member_named(cMemName)
#else:
#cMember = None
if cMember:
cMemberDisplay = DisplayName.name(cMember)
else:
cMemberDisplay = promoSorted[index]['Name']
msg = '{}\n{}. *{}* - *{:,} xp*'.format(msg, i+1, cMemberDisplay, promoSorted[index]['XP'])
await ctx.message.channel.send(msg)
# List the xp and xp reserve of a user
@commands.command(pass_context=True)
async def stats(self, ctx, *, member= None):
"""List the xp and xp reserve of a listed member."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
if member is None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.message.channel.send(msg)
return
# Create blank embed
stat_embed = discord.Embed(color=member.color)
# Get user's xp
newStat = int(self.settings.getUserStat(member, ctx.message.guild, "XP"))
newState = int(self.settings.getUserStat(member, ctx.message.guild, "XPReserve"))
# Add XP and XP Reserve
stat_embed.add_field(name="XP", value="{:,}".format(newStat), inline=True)
stat_embed.add_field(name="XP Reserve", value="{:,}".format(newState), inline=True)
memName = member.name
# Get member's avatar url
avURL = member.avatar_url
if not len(avURL):
avURL = member.default_avatar_url
if member.nick:
# We have a nickname
msg = "__***{},*** **who currently goes by** ***{}:***__\n\n".format(member.name, member.nick)
# Add to embed
stat_embed.set_author(name='{}, who currently goes by {}'.format(member.name, member.nick), icon_url=avURL)
else:
msg = "__***{}:***__\n\n".format(member.name)
# Add to embed
stat_embed.set_author(name='{}'.format(member.name), icon_url=avURL)
msg = "{}**Joined:** *{}*\n".format(msg, member.joined_at.strftime("%Y-%m-%d %I:%M %p")) # I think this will work
msg = "{}**XP:** *{:,}*\n".format(msg, newStat)
msg = "{}**XP Reserve:** *{:,}*\n".format(msg, newState)
# Add Joined
stat_embed.add_field(name="Joined", value=member.joined_at.strftime("%Y-%m-%d %I:%M %p"), inline=True)
# msg = '*{}* has *{} xp*, and can gift up to *{} xp!*'.format(DisplayName.name(member), newStat, newState)
# Get user's current role
promoArray = self.settings.getServerStat(ctx.message.guild, "PromotionArray")
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:int(x['XP']))
highestRole = None
if len(promoSorted):
nextRole = promoSorted[0]
else:
nextRole = None
for role in promoSorted:
if int(nextRole['XP']) < newStat:
nextRole = role
# We *can* have this role, let's see if we already do
currentRole = None
for aRole in member.roles:
# Get the role that corresponds to the id
if str(aRole.id) == str(role['ID']):
# We found it
highestRole = aRole.name
if len(promoSorted) > (promoSorted.index(role)+1):
# There's more roles above this
nRoleIndex = promoSorted.index(role)+1
nextRole = promoSorted[nRoleIndex]
if highestRole:
msg = '{}**Current Rank:** *{}*\n'.format(msg, highestRole)
# Add Rank
stat_embed.add_field(name="Current Rank", value=highestRole, inline=True)
else:
if len(promoSorted):
# Need to have ranks to acquire one
msg = '{}They have not acquired a rank yet.\n'.format(msg)
# Add Rank
stat_embed.add_field(name="Current Rank", value='None acquired yet', inline=True)
if nextRole and (newStat < int(nextRole['XP'])):
msg = '{}\n*{:,}* more *xp* required to advance to **{}**'.format(msg, int(nextRole['XP']) - newStat, nextRole['Name'])
# Add Next Rank
stat_embed.add_field(name="Next Rank", value='{} ({:,} more xp required)'.format(nextRole['Name'], int(nextRole['XP'])-newStat), inline=True)
if member.game:
if member.game.name:
# Playing a game!
stat_embed.add_field(name="Playing", value=str(member.game.name), inline=True)
# add created_at footer
created = "Created at " + member.created_at.strftime("%Y-%m-%d %I:%M %p") + " UTC"
stat_embed.set_footer(text=created)
#await ctx.message.channel.send(msg)
await ctx.message.channel.send(embed=stat_embed)
@stats.error
async def stats_error(self, error, ctx):
msg = 'stats Error: {}'.format(error)
await ctx.channel.send(msg)
# List the xp and xp reserve of a user
@commands.command(pass_context=True)
async def xpinfo(self, ctx):
"""Gives a quick rundown of the xp system."""
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
serverName = self.suppressed(server, server.name)
hourlyXP = int(self.settings.getServerStat(server, "HourlyXP"))
hourlyXPReal = int(self.settings.getServerStat(server, "HourlyXPReal"))
xpPerMessage = int(self.settings.getServerStat(server, "XPPerMessage"))
xpRPerMessage = int(self.settings.getServerStat(server, "XPRPerMessage"))
if not xpPerMessage:
xpPerMessage = 0
if not xpRPerMessage:
xpRPerMessage = 0
if not hourlyXPReal:
hourlyXPReal = 0
if not hourlyXP:
hourlyXP = 0
onlyOnline = self.settings.getServerStat(server, "RequireOnline")
xpProm = self.settings.getServerStat(server, "XPPromote")
xpDem = self.settings.getServerStat(server, "XPDemote")
xpStr = None
if (xpProm.lower() == "yes") and (xpDem.lower() == "yes"):
# Bot promote and demote
xpStr = "This is what I check to handle promotions and demotions.\n"
else:
if xpProm.lower() == "yes":
xpStr = "This is what I check to handle promotions.\n"
elif xpDem.lower() == "yes":
xpStr = "This is what I check to handle demotions.\n"
msg = "__***{}'s*** **XP System**__\n\n__What's What:__\n\n".format(serverName)
msg = "{}**XP:** This is the xp you have *earned.*\nIt comes from other users gifting you xp, or if you're lucky enough to `{}gamble` and win.\n".format(msg, ctx.prefix)
if xpStr:
msg = "{}{}".format(msg, xpStr)
hourStr = None
if hourlyXPReal > 0:
hourStr = "Currently, you receive *{} xp* each hour".format(hourlyXPReal)
if onlyOnline.lower() == "yes":
hourStr = "{} (but *only* if your status is *Online*).".format(hourStr)
else:
hourStr = "{}.".format(hourStr)
if hourStr:
msg = "{}{}\n".format(msg, hourStr)
if xpPerMessage > 0:
msg = "{}Currently, you receive *{} xp* per message.\n".format(msg, xpPerMessage)
msg = "{}This can only be taken away by an *admin*.\n\n".format(msg)
msg = "{}**XP Reserve:** This is the xp you can *gift*, *gamble*, or use to *feed* me.\n".format(msg)
hourStr = None
if hourlyXP > 0:
hourStr = "Currently, you receive *{} xp reserve* each hour".format(hourlyXP)
if onlyOnline.lower() == "yes":
hourStr = "{} (but *only* if your status is *Online*).".format(hourStr)
else:
hourStr = "{}.".format(hourStr)
if hourStr:
msg = "{}{}\n".format(msg, hourStr)
if xpRPerMessage > 0:
msg = "{}Currently, you receive *{} xp reserve* per message.\n".format(msg, xpRPerMessage)
msg = "{}\n__How Do I Use It?:__\n\nYou can gift other users xp by using the `{}xp [user] [amount]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve*, and adds to their *xp*.\n".format(msg)
msg = "{}It does not change the *xp* you have *earned*.\n\n".format(msg)
msg = "{}You can gamble your *xp reserve* to have a chance to win a percentage back as *xp* for yourself.\n".format(msg)
msg = "{}You do so by using the `{}gamble [amount in multiple of 10]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve* - and if you win, adds to your *xp*.\n\n".format(msg)
msg = "{}You can also *feed* me.\n".format(msg)
msg = "{}This is done with the `{}feed [amount]` command.\n".format(msg, ctx.prefix)
msg = "{}This pulls from your *xp reserve* - and doesn't affect your *xp*.\n\n".format(msg)
msg = "{}You can check your *xp*, *xp reserve*, current role, and next role using the `{}stats` command.\n".format(msg, ctx.prefix)
msg = "{}You can check another user's stats with the `{}stats [user]` command.\n\n".format(msg, ctx.prefix)
# Get the required role for using the xp system
role = self.settings.getServerStat(server, "RequiredXPRole")
if role == None or role == "":
msg = '{}Currently, **Everyone** can *give xp*, *gamble*, and *feed* the bot.\n\n'.format(msg)
else:
# Role is set - let's get its name
found = False
for arole in server.roles:
if str(arole.id) == str(role):
found = True
vowels = "aeiou"
if arole.name[:1].lower() in vowels:
msg = '{}Currently, you need to be an **{}** to *give xp*, *gamble*, or *feed* the bot.\n\n'.format(msg, arole.name)
else:
msg = '{}Currently, you need to be a **{}** to *give xp*, *gamble*, or *feed* the bot.\n\n'.format(msg, arole.name)
if not found:
msg = '{}There is no role that matches id: `{}` for using the xp system - consider updating that setting.\n\n'.format(msg, role)
msg = "{}Hopefully that clears things up!".format(msg)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.message.channel.send(msg)
|
|
"""Simulation to examine the P(reject) as the parameters for each problem are
varied. What varies will depend on the problem."""
__author__ = 'wittawat'
import kgof
import kgof.data as data
import kgof.glo as glo
import kgof.density as density
import kgof.goftest as gof
import kgof.intertst as tgof
import kgof.mmd as mgof
import kgof.util as util
import kgof.kernel as kernel
# need independent_jobs package
# https://github.com/karlnapf/independent-jobs
# The independent_jobs and kgof have to be in the global search path (.bashrc)
import independent_jobs as inj
from independent_jobs.jobs.IndependentJob import IndependentJob
from independent_jobs.results.SingleResult import SingleResult
from independent_jobs.aggregators.SingleResultAggregator import SingleResultAggregator
from independent_jobs.engines.BatchClusterParameters import BatchClusterParameters
from independent_jobs.engines.SerialComputationEngine import SerialComputationEngine
from independent_jobs.engines.SlurmComputationEngine import SlurmComputationEngine
from independent_jobs.tools.Log import logger
import logging
import math
#import numpy as np
import autograd.numpy as np
import os
import sys
import time
"""
All the job functions return a dictionary with the following keys:
- goftest: test object. (may or may not return)
- test_result: the result from calling perform_test(te).
- time_secs: run time in seconds
"""
def job_fssdJ1q_med(p, data_source, tr, te, r, J=1, null_sim=None):
"""
FSSD test with a Gaussian kernel, where the test locations are randomized,
and the Gaussian width is set with the median heuristic. Use full sample.
No training/testing splits.
p: an UnnormalizedDensity
data_source: a DataSource
tr, te: Data
r: trial number (positive integer)
"""
if null_sim is None:
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
# full data
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
med = util.meddistance(X, subsample=1000)
k = kernel.KGauss(med**2)
V = util.fit_gaussian_draw(X, J, seed=r+3)
fssd_med = gof.FSSD(p, k, V, null_sim=null_sim, alpha=alpha)
fssd_med_result = fssd_med.perform_test(data)
return {
'goftest': fssd_med,
'test_result': fssd_med_result, 'time_secs': t.secs}
def job_fssdJ5q_med(p, data_source, tr, te, r):
"""
FSSD. J=5
"""
return job_fssdJ1q_med(p, data_source, tr, te, r, J=5)
def job_fssdJ1q_opt(p, data_source, tr, te, r, J=1, null_sim=None):
"""
FSSD with optimization on tr. Test on te. Use a Gaussian kernel.
"""
if null_sim is None:
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
Xtr = tr.data()
with util.ContextTimer() as t:
# Use grid search to initialize the gwidth
n_gwidth_cand = 5
gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand)
med2 = util.meddistance(Xtr, 1000)**2
k = kernel.KGauss(med2*2)
# fit a Gaussian to the data and draw to initialize V0
V0 = util.fit_gaussian_draw(Xtr, J, seed=r+1, reg=1e-6)
list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
besti, objs = gof.GaussFSSD.grid_search_gwidth(p, tr, V0, list_gwidth)
gwidth = list_gwidth[besti]
assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
logging.info('After grid search, gwidth=%.3g'%gwidth)
ops = {
'reg': 1e-2,
'max_iter': 40,
'tol_fun': 1e-4,
'disp': True,
'locs_bounds_frac':10.0,
'gwidth_lb': 1e-1,
'gwidth_ub': 1e4,
}
V_opt, gwidth_opt, info = gof.GaussFSSD.optimize_locs_widths(p, tr,
gwidth, V0, **ops)
# Use the optimized parameters to construct a test
k_opt = kernel.KGauss(gwidth_opt)
fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
fssd_opt_result = fssd_opt.perform_test(te)
return {'test_result': fssd_opt_result, 'time_secs': t.secs,
'goftest': fssd_opt, 'opt_info': info,
}
def job_fssdJ5q_opt(p, data_source, tr, te, r):
return job_fssdJ1q_opt(p, data_source, tr, te, r, J=5)
def job_fssdJ10q_opt(p, data_source, tr, te, r):
return job_fssdJ1q_opt(p, data_source, tr, te, r, J=10)
def job_fssdJ5p_opt(p, data_source, tr, te, r):
"""
The suffix p means that p is sampled to get a sample for computing the
covariance matrix under H0.
"""
null_sim = gof.FSSDH0SimCovDraw(n_draw=2000, n_simulate=2000, seed=r)
return job_fssdJ1q_opt(p, data_source, tr, te, r, J=5, null_sim=null_sim)
def job_fssdJ10p_opt(p, data_source, tr, te, r):
"""
The suffix p means that p is sampled to get a sample for computing the
covariance matrix under H0.
"""
null_sim = gof.FSSDH0SimCovDraw(n_draw=2000, n_simulate=2000, seed=r)
return job_fssdJ1q_opt(p, data_source, tr, te, r, J=10, null_sim=null_sim)
def job_fssdJ1q_imq_optv(p, data_source, tr, te, r, J=1, b=-0.5, null_sim=None):
"""
FSSD with optimization on tr. Test on te. Use an inverse multiquadric
kernel (IMQ). Optimize only the test locations (V). Fix the kernel
parameters to b = -0.5, c=1. These are the recommended values from
Measuring Sample Quality with Kernels
Jackson Gorham, Lester Mackey
"""
if null_sim is None:
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
Xtr = tr.data()
with util.ContextTimer() as t:
# IMQ kernel parameters: b and c
c = 1.0
# fit a Gaussian to the data and draw to initialize V0
V0 = util.fit_gaussian_draw(Xtr, J, seed=r+1, reg=1e-6)
ops = {
'reg': 1e-5,
'max_iter': 30,
'tol_fun': 1e-6,
'disp': True,
'locs_bounds_frac':20.0,
}
V_opt, info = gof.IMQFSSD.optimize_locs(p, tr, b, c, V0, **ops)
k_imq = kernel.KIMQ(b=b, c=c)
# Use the optimized parameters to construct a test
fssd_imq = gof.FSSD(p, k_imq, V_opt, null_sim=null_sim, alpha=alpha)
fssd_imq_result = fssd_imq.perform_test(te)
return {'test_result': fssd_imq_result, 'time_secs': t.secs,
'goftest': fssd_imq, 'opt_info': info,
}
def job_fssdJ5q_imq_optv(p, data_source, tr, te, r):
return job_fssdJ1q_imq_optv(p, data_source, tr, te, r, J=5)
def job_fssdJ5q_imqb1_optv(p, data_source, tr, te, r):
return job_fssdJ1q_imq_optv(p, data_source, tr, te, r, J=5, b=-1.0)
def job_fssdJ5q_imqb2_optv(p, data_source, tr, te, r):
return job_fssdJ1q_imq_optv(p, data_source, tr, te, r, J=5, b=-2.0)
def job_fssdJ5q_imqb3_optv(p, data_source, tr, te, r):
return job_fssdJ1q_imq_optv(p, data_source, tr, te, r, J=5, b=-3.0)
def job_fssdJ1q_imq_opt(p, data_source, tr, te, r, J=1, null_sim=None):
"""
FSSD with optimization on tr. Test on te. Use an inverse multiquadric
kernel (IMQ). Optimize all parameters: the test locations (V), b and c (in
the kernel).
"""
if null_sim is None:
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
Xtr = tr.data()
with util.ContextTimer() as t:
# Initial IMQ kernel parameters: b and c
b0 = -0.5
c0 = 1.0
# fit a Gaussian to the data and draw to initialize V0
V0 = util.fit_gaussian_draw(Xtr, J, seed=r+1, reg=1e-6)
ops = {
'reg': 1e-5,
'max_iter': 50,
'tol_fun': 1e-6,
'disp': True,
'locs_bounds_frac':20.0,
# IMQ kernel bounds
'b_lb': -3,
'b_ub': -0.5,
'c_lb': 1e-1,
'c_ub': np.sqrt(10),
}
V_opt, b_opt, c_opt, info = gof.IMQFSSD.optimize_locs_params(p, tr, b0,
c0, V0, **ops)
k_imq = kernel.KIMQ(b=b_opt, c=c_opt)
# Use the optimized parameters to construct a test
fssd_imq = gof.FSSD(p, k_imq, V_opt, null_sim=null_sim, alpha=alpha)
fssd_imq_result = fssd_imq.perform_test(te)
return {'test_result': fssd_imq_result, 'time_secs': t.secs,
'goftest': fssd_imq, 'opt_info': info,
}
def job_fssdJ5q_imq_opt(p, data_source, tr, te, r, null_sim=None):
return job_fssdJ1q_imq_opt(p, data_source, tr, te, r, J=5)
def job_fssdJ1q_imq_optbv(p, data_source, tr, te, r, J=1, null_sim=None):
"""
FSSD with optimization on tr. Test on te. Use an inverse multiquadric
kernel (IMQ). Optimize the test locations (V), and b. Fix c (in the kernel)
"""
if null_sim is None:
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
Xtr = tr.data()
with util.ContextTimer() as t:
# Initial IMQ kernel parameters: b and c
b0 = -0.5
# Fix c to this value
c = 1.0
c0 = c
# fit a Gaussian to the data and draw to initialize V0
V0 = util.fit_gaussian_draw(Xtr, J, seed=r+1, reg=1e-6)
ops = {
'reg': 1e-5,
'max_iter': 40,
'tol_fun': 1e-6,
'disp': True,
'locs_bounds_frac':20.0,
# IMQ kernel bounds
'b_lb': -20,
'c_lb': c,
'c_ub': c,
}
V_opt, b_opt, c_opt, info = gof.IMQFSSD.optimize_locs_params(p, tr, b0,
c0, V0, **ops)
k_imq = kernel.KIMQ(b=b_opt, c=c_opt)
# Use the optimized parameters to construct a test
fssd_imq = gof.FSSD(p, k_imq, V_opt, null_sim=null_sim, alpha=alpha)
fssd_imq_result = fssd_imq.perform_test(te)
return {'test_result': fssd_imq_result, 'time_secs': t.secs,
'goftest': fssd_imq, 'opt_info': info,
}
def job_fssdJ5q_imq_optbv(p, data_source, tr, te, r, null_sim=None):
return job_fssdJ1q_imq_optbv(p, data_source, tr, te, r, J=5)
def job_me_opt(p, data_source, tr, te, r, J=5):
"""
ME test of Jitkrittum et al., 2016 used as a goodness-of-fit test.
Gaussian kernel. Optimize test locations and Gaussian width.
"""
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
#pds = p.get_datasource()
#datY = pds.sample(data.sample_size(), seed=r+294)
#Y = datY.data()
#XY = np.vstack((X, Y))
#med = util.meddistance(XY, subsample=1000)
op = {'n_test_locs': J, 'seed': r+5, 'max_iter': 40,
'batch_proportion': 1.0, 'locs_step_size': 1.0,
'gwidth_step_size': 0.1, 'tol_fun': 1e-4,
'reg': 1e-4}
# optimize on the training set
me_opt = tgof.GaussMETestOpt(p, n_locs=J, tr_proportion=tr_proportion,
alpha=alpha, seed=r+111)
me_result = me_opt.perform_test(data, op)
return { 'test_result': me_result, 'time_secs': t.secs}
def job_kstein_med(p, data_source, tr, te, r):
"""
Kernel Stein discrepancy test of Liu et al., 2016 and Chwialkowski et al.,
2016. Use full sample. Use Gaussian kernel.
"""
# full data
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
med = util.meddistance(X, subsample=1000)
k = kernel.KGauss(med**2)
kstein = gof.KernelSteinTest(p, k, alpha=alpha, n_simulate=1000, seed=r)
kstein_result = kstein.perform_test(data)
return { 'test_result': kstein_result, 'time_secs': t.secs}
def job_kstein_imq(p, data_source, tr, te, r):
"""
Kernel Stein discrepancy test of Liu et al., 2016 and Chwialkowski et al.,
2016. Use full sample. Use the inverse multiquadric kernel (IMQ) studied
in
Measuring Sample Quality with Kernels
Gorham and Mackey 2017.
Parameters are fixed to the recommented values: beta = b = -0.5, c = 1.
"""
# full data
data = tr + te
X = data.data()
with util.ContextTimer() as t:
k = kernel.KIMQ(b=-0.5, c=1.0)
kstein = gof.KernelSteinTest(p, k, alpha=alpha, n_simulate=1000, seed=r)
kstein_result = kstein.perform_test(data)
return { 'test_result': kstein_result, 'time_secs': t.secs}
def job_lin_kstein_med(p, data_source, tr, te, r):
"""
Linear-time version of the kernel Stein discrepancy test of Liu et al.,
2016 and Chwialkowski et al., 2016. Use full sample.
"""
# full data
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
med = util.meddistance(X, subsample=1000)
k = kernel.KGauss(med**2)
lin_kstein = gof.LinearKernelSteinTest(p, k, alpha=alpha, seed=r)
lin_kstein_result = lin_kstein.perform_test(data)
return { 'test_result': lin_kstein_result, 'time_secs': t.secs}
def job_mmd_med(p, data_source, tr, te, r):
"""
MMD test of Gretton et al., 2012 used as a goodness-of-fit test.
Require the ability to sample from p i.e., the UnnormalizedDensity p has
to be able to return a non-None from get_datasource()
"""
# full data
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
pds = p.get_datasource()
datY = pds.sample(data.sample_size(), seed=r+294)
Y = datY.data()
XY = np.vstack((X, Y))
# If p, q differ very little, the median may be very small, rejecting H0
# when it should not?
medx = util.meddistance(X, subsample=1000)
medy = util.meddistance(Y, subsample=1000)
medxy = util.meddistance(XY, subsample=1000)
med_avg = (medx+medy+medxy)/3.0
k = kernel.KGauss(med_avg**2)
mmd_test = mgof.QuadMMDGof(p, k, n_permute=400, alpha=alpha, seed=r)
mmd_result = mmd_test.perform_test(data)
return { 'test_result': mmd_result, 'time_secs': t.secs}
def job_mmd_opt(p, data_source, tr, te, r):
"""
MMD test of Gretton et al., 2012 used as a goodness-of-fit test.
Require the ability to sample from p i.e., the UnnormalizedDensity p has
to be able to return a non-None from get_datasource()
With optimization. Gaussian kernel.
"""
data = tr + te
X = data.data()
with util.ContextTimer() as t:
# median heuristic
pds = p.get_datasource()
datY = pds.sample(data.sample_size(), seed=r+294)
Y = datY.data()
XY = np.vstack((X, Y))
med = util.meddistance(XY, subsample=1000)
# Construct a list of kernels to try based on multiples of the median
# heuristic
#list_gwidth = np.hstack( (np.linspace(20, 40, 10), (med**2)
# *(2.0**np.linspace(-2, 2, 20) ) ) )
list_gwidth = (med**2)*(2.0**np.linspace(-3, 3, 30) )
list_gwidth.sort()
candidate_kernels = [kernel.KGauss(gw2) for gw2 in list_gwidth]
mmd_opt = mgof.QuadMMDGofOpt(p, n_permute=300, alpha=alpha, seed=r+56)
mmd_result = mmd_opt.perform_test(data,
candidate_kernels=candidate_kernels,
tr_proportion=tr_proportion, reg=1e-3)
return { 'test_result': mmd_result, 'time_secs': t.secs}
# Define our custom Job, which inherits from base class IndependentJob
class Ex2Job(IndependentJob):
def __init__(self, aggregator, p, data_source,
prob_label, rep, job_func, prob_param):
#walltime = 60*59*24
walltime = 60*59
memory = int(tr_proportion*sample_size*1e-2) + 50
IndependentJob.__init__(self, aggregator, walltime=walltime,
memory=memory)
# p: an UnnormalizedDensity
self.p = p
self.data_source = data_source
self.prob_label = prob_label
self.rep = rep
self.job_func = job_func
self.prob_param = prob_param
# we need to define the abstract compute method. It has to return an instance
# of JobResult base class
def compute(self):
p = self.p
data_source = self.data_source
r = self.rep
prob_param = self.prob_param
job_func = self.job_func
# sample_size is a global variable
data = data_source.sample(sample_size, seed=r)
with util.ContextTimer() as t:
tr, te = data.split_tr_te(tr_proportion=tr_proportion, seed=r+21 )
prob_label = self.prob_label
logger.info("computing. %s. prob=%s, r=%d,\
param=%.3g"%(job_func.__name__, prob_label, r, prob_param))
job_result = job_func(p, data_source, tr, te, r)
# create ScalarResult instance
result = SingleResult(job_result)
# submit the result to my own aggregator
self.aggregator.submit_result(result)
func_name = job_func.__name__
logger.info("done. ex2: %s, prob=%s, r=%d, param=%.3g. Took: %.3g s "%(func_name,
prob_label, r, prob_param, t.secs))
# save result
fname = '%s-%s-n%d_r%d_p%g_a%.3f_trp%.2f.p' \
%(prob_label, func_name, sample_size, r, prob_param, alpha,
tr_proportion)
glo.ex_save_result(ex, job_result, prob_label, fname)
# This import is needed so that pickle knows about the class Ex2Job.
# pickle is used when collecting the results from the submitted jobs.
from kgof.ex.ex2_prob_params import Ex2Job
from kgof.ex.ex2_prob_params import job_fssdJ1q_med
from kgof.ex.ex2_prob_params import job_fssdJ5q_med
from kgof.ex.ex2_prob_params import job_fssdJ1q_opt
from kgof.ex.ex2_prob_params import job_fssdJ5q_opt
from kgof.ex.ex2_prob_params import job_fssdJ10q_opt
from kgof.ex.ex2_prob_params import job_fssdJ5p_opt
from kgof.ex.ex2_prob_params import job_fssdJ10p_opt
from kgof.ex.ex2_prob_params import job_fssdJ1q_imq_optv
from kgof.ex.ex2_prob_params import job_fssdJ5q_imq_optv
from kgof.ex.ex2_prob_params import job_fssdJ5q_imqb1_optv
from kgof.ex.ex2_prob_params import job_fssdJ5q_imqb2_optv
from kgof.ex.ex2_prob_params import job_fssdJ5q_imqb3_optv
from kgof.ex.ex2_prob_params import job_fssdJ1q_imq_opt
from kgof.ex.ex2_prob_params import job_fssdJ5q_imq_opt
from kgof.ex.ex2_prob_params import job_fssdJ1q_imq_optbv
from kgof.ex.ex2_prob_params import job_fssdJ5q_imq_optbv
from kgof.ex.ex2_prob_params import job_me_opt
from kgof.ex.ex2_prob_params import job_kstein_med
from kgof.ex.ex2_prob_params import job_kstein_imq
from kgof.ex.ex2_prob_params import job_lin_kstein_med
from kgof.ex.ex2_prob_params import job_mmd_med
from kgof.ex.ex2_prob_params import job_mmd_opt
#--- experimental setting -----
ex = 2
# sample size = n (the training and test sizes are n/2)
sample_size = 1000
# number of test locations / test frequencies J
alpha = 0.05
# training proportion of the FSSD test, MMD-opt test
tr_proportion = 0.2
# repetitions for each parameter setting
reps = 200
method_job_funcs = [
job_fssdJ5q_opt,
job_fssdJ5q_med,
job_kstein_med,
job_lin_kstein_med,
job_mmd_opt,
job_me_opt,
#job_fssdJ5q_imq_opt,
#job_fssdJ5q_imq_optv,
#job_fssdJ5q_imq_optbv,
#job_fssdJ5q_imqb1_optv,
#job_fssdJ5q_imqb2_optv,
#job_fssdJ5q_imqb3_optv,
#job_fssdJ10q_opt,
#job_fssdJ5p_opt,
#job_fssdJ10p_opt,
#job_kstein_imq,
#job_mmd_med,
]
# If is_rerun==False, do not rerun the experiment if a result file for the current
# setting of (pi, r) already exists.
is_rerun = False
#---------------------------
def gaussbern_rbm_probs(stds_perturb_B, dx=50, dh=10, n=sample_size):
"""
Get a sequence of Gaussian-Bernoulli RBM problems.
We follow the parameter settings as described in section 6 of Liu et al.,
2016.
- stds_perturb_B: a list of Gaussian noise standard deviations for perturbing B.
- dx: observed dimension
- dh: latent dimension
"""
probs = []
for i, std in enumerate(stds_perturb_B):
with util.NumpySeedContext(seed=i+1000):
B = np.random.randint(0, 2, (dx, dh))*2 - 1.0
b = np.random.randn(dx)
c = np.random.randn(dh)
p = density.GaussBernRBM(B, b, c)
if std <= 1e-8:
B_perturb = B
else:
B_perturb = B + np.random.randn(dx, dh)*std
gb_rbm = data.DSGaussBernRBM(B_perturb, b, c, burnin=2000)
probs.append((std, p, gb_rbm))
return probs
def get_pqsource_list(prob_label):
"""
Return [(prob_param, p, ds) for ... ], a list of tuples
where
- prob_param: a problem parameters. Each parameter has to be a
scalar (so that we can plot them later). Parameters are preferably
positive integers.
- p: a Density representing the distribution p
- ds: a DataSource, each corresponding to one parameter setting.
The DataSource generates sample from q.
"""
sg_ds = [1, 5, 10, 15]
gmd_ds = [5, 20, 40, 60]
# vary the mean
gmd_d10_ms = [0, 0.02, 0.04, 0.06]
gvinc_d1_vs = [1, 1.5, 2, 2.5]
gvinc_d5_vs = [1, 1.5, 2, 2.5]
gvsub1_d1_vs = [0.1, 0.3, 0.5, 0.7]
gvd_ds = [1, 5, 10, 15]
#gb_rbm_dx50_dh10_stds = [0, 0.01, 0.02, 0.03]
gb_rbm_dx50_dh10_stds = [0, 0.02, 0.04, 0.06]
#gb_rbm_dx50_dh10_stds = [0]
gb_rbm_dx50_dh40_stds = [0, 0.01, 0.02, 0.04, 0.06]
glaplace_ds = [1, 5, 10, 15]
prob2tuples = {
# H0 is true. vary d. P = Q = N(0, I)
'sg': [(d, density.IsotropicNormal(np.zeros(d), 1),
data.DSIsotropicNormal(np.zeros(d), 1) ) for d in sg_ds],
# vary d. P = N(0, I), Q = N( (c,..0), I)
'gmd': [(d, density.IsotropicNormal(np.zeros(d), 1),
data.DSIsotropicNormal(np.hstack((1, np.zeros(d-1))), 1) )
for d in gmd_ds
],
# P = N(0, I), Q = N( (m, ..0), I). Vary m
'gmd_d10_ms': [(m, density.IsotropicNormal(np.zeros(10), 1),
data.DSIsotropicNormal(np.hstack((m, np.zeros(9))), 1) )
for m in gmd_d10_ms
],
# d=1. Increase the variance. P = N(0, I). Q = N(0, v*I)
'gvinc_d1': [(var, density.IsotropicNormal(np.zeros(1), 1),
data.DSIsotropicNormal(np.zeros(1), var) )
for var in gvinc_d1_vs
],
# d=5. Increase the variance. P = N(0, I). Q = N(0, v*I)
'gvinc_d5': [(var, density.IsotropicNormal(np.zeros(5), 1),
data.DSIsotropicNormal(np.zeros(5), var) )
for var in gvinc_d5_vs
],
# d=1. P=N(0,1), Q(0,v). Consider the variance below 1.
'gvsub1_d1': [(var, density.IsotropicNormal(np.zeros(1), 1),
data.DSIsotropicNormal(np.zeros(1), var) )
for var in gvsub1_d1_vs
],
# Gaussian variance difference problem. Only the variance
# of the first dimenion differs. d varies.
'gvd': [(d, density.Normal(np.zeros(d), np.eye(d) ),
data.DSNormal(np.zeros(d), np.diag(np.hstack((2, np.ones(d-1)))) ))
for d in gvd_ds],
# Gaussian Bernoulli RBM. dx=50, dh=10
'gbrbm_dx50_dh10': gaussbern_rbm_probs(gb_rbm_dx50_dh10_stds,
dx=50, dh=10, n=sample_size),
# Gaussian Bernoulli RBM. dx=50, dh=40
'gbrbm_dx50_dh40': gaussbern_rbm_probs(gb_rbm_dx50_dh40_stds,
dx=50, dh=40, n=sample_size),
# p: N(0, I), q: standard Laplace. Vary d
'glaplace': [(d, density.IsotropicNormal(np.zeros(d), 1),
# Scaling of 1/sqrt(2) will make the variance 1.
data.DSLaplace(d=d, loc=0, scale=1.0/np.sqrt(2))) for d in glaplace_ds],
}
if prob_label not in prob2tuples:
raise ValueError('Unknown problem label. Need to be one of %s'%str(prob2tuples.keys()) )
return prob2tuples[prob_label]
def run_problem(prob_label):
"""Run the experiment"""
L = get_pqsource_list(prob_label)
prob_params, ps, data_sources = zip(*L)
# make them lists
prob_params = list(prob_params)
ps = list(ps)
data_sources = list(data_sources)
# /////// submit jobs //////////
# create folder name string
#result_folder = glo.result_folder()
from kgof.config import expr_configs
tmp_dir = expr_configs['scratch_path']
foldername = os.path.join(tmp_dir, 'kgof_slurm', 'e%d'%ex)
logger.info("Setting engine folder to %s" % foldername)
# create parameter instance that is needed for any batch computation engine
logger.info("Creating batch parameter instance")
batch_parameters = BatchClusterParameters(
foldername=foldername, job_name_base="e%d_"%ex, parameter_prefix="")
# Use the following line if Slurm queue is not used.
#engine = SerialComputationEngine()
engine = SlurmComputationEngine(batch_parameters)
#engine = SlurmComputationEngine(batch_parameters, partition='wrkstn,compute')
n_methods = len(method_job_funcs)
# repetitions x len(prob_params) x #methods
aggregators = np.empty((reps, len(prob_params), n_methods ), dtype=object)
for r in range(reps):
for pi, param in enumerate(prob_params):
for mi, f in enumerate(method_job_funcs):
# name used to save the result
func_name = f.__name__
fname = '%s-%s-n%d_r%d_p%g_a%.3f_trp%.2f.p' \
%(prob_label, func_name, sample_size, r, param, alpha,
tr_proportion)
if not is_rerun and glo.ex_file_exists(ex, prob_label, fname):
logger.info('%s exists. Load and return.'%fname)
job_result = glo.ex_load_result(ex, prob_label, fname)
sra = SingleResultAggregator()
sra.submit_result(SingleResult(job_result))
aggregators[r, pi, mi] = sra
else:
# result not exists or rerun
# p: an UnnormalizedDensity object
p = ps[pi]
job = Ex2Job(SingleResultAggregator(), p, data_sources[pi],
prob_label, r, f, param)
agg = engine.submit_job(job)
aggregators[r, pi, mi] = agg
# let the engine finish its business
logger.info("Wait for all call in engine")
engine.wait_for_all()
# ////// collect the results ///////////
logger.info("Collecting results")
job_results = np.empty((reps, len(prob_params), n_methods), dtype=object)
for r in range(reps):
for pi, param in enumerate(prob_params):
for mi, f in enumerate(method_job_funcs):
logger.info("Collecting result (%s, r=%d, param=%.3g)" %
(f.__name__, r, param))
# let the aggregator finalize things
aggregators[r, pi, mi].finalize()
# aggregators[i].get_final_result() returns a SingleResult instance,
# which we need to extract the actual result
job_result = aggregators[r, pi, mi].get_final_result().result
job_results[r, pi, mi] = job_result
#func_names = [f.__name__ for f in method_job_funcs]
#func2labels = exglobal.get_func2label_map()
#method_labels = [func2labels[f] for f in func_names if f in func2labels]
# save results
results = {'job_results': job_results, 'prob_params': prob_params,
'alpha': alpha, 'repeats': reps,
'ps': ps,
'list_data_source': data_sources,
'tr_proportion': tr_proportion,
'method_job_funcs': method_job_funcs, 'prob_label': prob_label,
'sample_size': sample_size,
}
# class name
fname = 'ex%d-%s-me%d_n%d_rs%d_pmi%g_pma%g_a%.3f_trp%.2f.p' \
%(ex, prob_label, n_methods, sample_size, reps, min(prob_params),
max(prob_params), alpha, tr_proportion)
glo.ex_save_result(ex, results, fname)
logger.info('Saved aggregated results to %s'%fname)
def main():
if len(sys.argv) != 2:
print('Usage: %s problem_label'%sys.argv[0])
sys.exit(1)
prob_label = sys.argv[1]
run_problem(prob_label)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
""" Create a proxy of each records, where each nucleotide within the factor = 1, the rest = 0
"""
from Bio import SeqIO
import sys
import os
import numpy as np
import itertools
__author__ = "Titouan Laessle"
__copyright__ = "Copyright 2017 Titouan Laessle"
__license__ = "MIT"
# Wanted factor:
factor = str(sys.argv[1])
# Species genome path:
species_genome = str(sys.argv[2])
# Species abbreviation:
species = '_'.join(str(species_genome.split('/')[-1]).split('_')[:2])
# Species feature table path, depends on the type of factor:
if factor in ['LCR', 'TE', 'tandem']:
species_table = str(sys.argv[3])
factor_type = 'repeats'
elif factor == 'RNA':
species_table = str(sys.argv[4])
factor_type = 'features'
elif factor in ['CDS', 'intron', 'UTR']:
species_table = str(sys.argv[5])
factor_type = 'genes'
# Tracking file:
follow_up = str(sys.argv[6])
###
# Check if parent directory is present, if not create it
###
def checking_parent(file_path):
# We don't need the file name, so will take everything but the last part
parent_directories = '/'.join(file_path.split('/')[0:(len(file_path.split('/')) - 1)])
# As we uses parallel, we ended up we one thread doesn't seeing the directory, attempting
# creating it, while another just did the same -> error "The file already exist", and stopped everything...
try:
if not os.path.exists(parent_directories):
os.makedirs(parent_directories)
except:
pass
###
# Fetch a fasta file, and clean it (remove N or n, which stands for "any nucleotides)
# Note that if the fasta file contain multiple sequences, only the first will have its CGR computed !
# Input:
# - fasta_file : Path to the file containing the sequence one wants the CGR computed on
###
def fetch_fasta(fasta_file):
# Will only take the first sequence of the fasta file
try:
records = list(SeqIO.parse(fasta_file, "fasta"))
except:
print("Cannot open %s, check path!" % fasta_file)
sys.exit()
return (records)
###
# Translate a list of integers into a list of all the ranges found in this list of integers
###
def as_ranges(list_of_integers):
for p in itertools.groupby(enumerate(list_of_integers), lambda x_y: x_y[1]-x_y[0]):
b = list(p[1])
yield b[0][1], b[-1][1]
###
# Reading line varies in between species table (repeats vs features vs genes)
###
def reading_line(factor_type, feature_table):
if factor_type == 'repeats':
return feature_table.readline().rsplit()
# Same for features or genes
else:
return feature_table.readline().split('\t')
###
# Will output True if the line contain the right factor
# Will work differently depending on whether working on repeats or features or genes
###
def True_if_right_factor(factor_type, actual_line, feature_column, feature_type):
# Watch out for commentaries (thus length 1)
if len(actual_line) > 1:
# If bigger than one -> feature line
if factor_type == 'repeats':
return actual_line[feature_column].split('/')[0].strip('?') in feature_type
# Same for features or genes
else:
return actual_line[feature_column] in feature_type
else:
# Else we know it is a commentary = not the right factor...
return False
###
# Merge the overlapping intervals
###
def merge_intervals(intervals):
# Sort the intervals
sorted_interval = sorted(intervals, key=lambda interval: interval[0])
# This list will contain all the merged intervals
merged = [sorted_interval[0]]
for interval in sorted_interval:
# Remove and return last element of the list
last = merged.pop()
# If actual start is lower than this last interval end -> overlap
if last[1] >= interval[0]:
new_interval = (last[0], max(last[1], interval[1]))
merged.append(new_interval)
else:
merged.append(last)
merged.append(interval)
return merged
###
# As we look in both strands, for unknown reasons some feature on the - strain are put in the reverse order...
###
def strand_sensitive(actual_line, start_column, end_column):
both = [int(actual_line[start_column]) - 1, int(actual_line[end_column]) - 1]
both.sort()
return tuple(both)
###
# Compute a proxy of each records composed of 0 (nucleotide != factor) and 1 (nucleotide == factor)
# Inputs:
# - records : fetched sequence (fasta) of the species whole genome
# - factor_type : indicating if wants factor that are either repeats or features or genes
# - feature_type : what are the pattern to look for in the factor/gene column
# - species_table : file containing the wanted factor (either RepeatMasker output or gff file)
# - *_column : various information related to the internal structure of the species_table file
# - output : path to the output directory
# Note: if empty, will return the proxy as numpy array
# Output:
# - Either a numpy array of n (number of different records in records) ranges, if output is non-empty
# - Each proxy contain m (number of non-overlapping ranges) ranges
# - Or n (number of records in the fasta file) files containing the non-overlapping ranges
# of nucleotide which are factors
###
def extract_factor(records, factor_type, feature_type, species_table, id_column, feature_column,
start_column, end_column, output):
# Note: we always add -1 to make it compatible the pythonic start of counting at 0!
if not output:
# We will store the proxies of records in this list
proxies_records = list()
with open(species_table, 'r') as feature_table:
# Must skip the header (which differs in between feature_table and repeats:
if factor_type == 'repeats':
feature_table.readline()
feature_table.readline()
feature_table.readline()
actual_line = reading_line(factor_type, feature_table)
else:
line = feature_table.readline()
# Must skip the headers (varying length)
while line.startswith('#'):
line = feature_table.readline()
actual_line = line.split('\t')
for each_record in range(len(records)):
# This set will contain all the ranges of our wanted factor
all_ranges = set()
# We ended up with some issues with UTR, as for unknown reasons, the line are sometimes placed
# after the end of the record...
if factor == 'UTR':
# We must rewind every time to the start (commentaries are not a problem in this case)
feature_table.seek(0, 0)
for each_line in feature_table:
actual_line = each_line.split('\t')
if True_if_right_factor(factor_type, actual_line, feature_column, feature_type) \
and records[each_record].id == actual_line[id_column]:
all_ranges.add(strand_sensitive(actual_line, start_column, end_column))
# Any other factor than than UTR does not have this problem -> slightly faster version:
else:
# Whenever we are not already at our chromosome part -> skip until at it
while records[each_record].id != actual_line[id_column]:
actual_line = reading_line(factor_type, feature_table)
# We also have to find the first time the wanted feature appears
while not True_if_right_factor(factor_type, actual_line, feature_column, feature_type):
actual_line = reading_line(factor_type, feature_table)
# This line will be the first result
all_ranges.add(strand_sensitive(actual_line, start_column, end_column))
# Continue the search
actual_line = reading_line(factor_type, feature_table)
# While from the actual record, continue extracting
while records[each_record].id == actual_line[id_column]:
# Only do this for wanted feature
if True_if_right_factor(factor_type, actual_line, feature_column, feature_type):
all_ranges.add(strand_sensitive(actual_line, start_column, end_column))
# Continue searching
actual_line = reading_line(factor_type, feature_table)
# If it is not our factor, just continue the search
else:
actual_line = reading_line(factor_type, feature_table)
# If we get at the last line, actual_line only have one empty entry
try:
actual_line[1]
except IndexError:
break
factor_ranges = merge_intervals(all_ranges)
# Merge two intervals which are next to each other
length_of_list = len(factor_ranges)
i = 0
while i < (length_of_list - 1):
if factor_ranges[i][1] in (factor_ranges[i + 1][0], factor_ranges[i + 1][0] - 1):
factor_ranges[i:i + 2] = [[factor_ranges[i][0], factor_ranges[i + 1][1]]]
length_of_list -= 1
else:
i += 1
# If not output, use the ranges to mark the proxy
if not output:
# Add the proxy of factor to the list
proxies_records.append(factor_ranges)
# Else, write the ranges on a file named as the record id
else:
record_file = output + '/' + records[each_record].id
checking_parent(record_file)
with open(record_file, 'w') as outfile:
for each_range in factor_ranges:
outfile.write(str(each_range[0]) + '\t' + str(each_range[1]) + '\n')
if not output:
return proxies_records
if factor_type == 'repeats':
id_column = 4
feature_column = 10
# The feature type depends on the wanted feature
if factor == 'LCR':
feature_type = 'Low_complexity'
elif factor == 'TE':
feature_type = ['DNA', 'LINE', 'LTR', 'SINE', 'Retroposon', 'RC']
elif factor == 'tandem':
feature_type = ['Satellite', 'Simple_repeat']
start_column = 5
end_column = 6
elif factor_type == 'features':
id_column = 6
feature_column = 0
# We want all types of RNA
feature_type = ['misc_RNA', 'ncRNA', 'rRNA', 'tRNA']
start_column = 7
end_column = 8
elif factor_type == 'genes':
id_column = 0
feature_column = 2
# The feature type depends on the wanted feature
if factor == 'CDS':
feature_type = 'CDS'
elif factor == 'intron':
feature_type = 'intron'
elif factor == 'UTR':
feature_type = ['five_prime_UTR', 'three_prime_UTR']
start_column = 3
end_column = 4
# Fetch all the records from this species fasta
records = fetch_fasta(species_genome)
proxies_directory = '/'.join(['../files/factor_proxies', species, factor])
# Compute factor proxy of records
extract_factor(records, factor_type, feature_type, species_table, id_column, feature_column, start_column, end_column,
proxies_directory)
# Follow the progression of the analysis
checking_parent(follow_up)
with open(follow_up, 'w') as file:
file.write('')
|
|
# Natural Language Toolkit: Discourse Representation Theory (DRT)
#
# Author: Dan Garrette <dhgarrette@gmail.com>
#
# Copyright (C) 2001-2011 NLTK Project
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from logic import *
# Import Tkinter-based modules if they are available
try:
from Tkinter import Canvas
from Tkinter import Tk
from tkFont import Font
from nltk.util import in_idle
except ImportError:
# No need to print a warning here, nltk.draw has already printed one.
pass
class DrtTokens(Tokens):
DRS = 'DRS'
DRS_CONC = '+'
PRONOUN = 'PRO'
OPEN_BRACKET = '['
CLOSE_BRACKET = ']'
PUNCT = [DRS_CONC, OPEN_BRACKET, CLOSE_BRACKET]
SYMBOLS = Tokens.SYMBOLS + PUNCT
TOKENS = Tokens.TOKENS + [DRS] + PUNCT
class AbstractDrs(object):
"""
This is the base abstract DRT Expression from which every DRT
Expression extends.
"""
def applyto(self, other):
return DrtApplicationExpression(self, other)
def __neg__(self):
return DrtNegatedExpression(self)
def __and__(self, other):
raise NotImplementedError()
def __or__(self, other):
assert isinstance(other, AbstractDrs)
return DrtOrExpression(self, other)
def __gt__(self, other):
assert isinstance(other, AbstractDrs)
if isinstance(self, DRS):
return DRS(self.refs, self.conds, other)
if isinstance(self, DrtConcatenation):
return DrtConcatenation(self.first, self.second, other)
raise Exception('Antecedent of implication must be a DRS')
def equiv(self, other, prover=None):
"""
Check for logical equivalence.
Pass the expression (self <-> other) to the theorem prover.
If the prover says it is valid, then the self and other are equal.
@param other: an C{AbstractDrs} to check equality against
@param prover: a C{nltk.inference.api.Prover}
"""
assert isinstance(other, AbstractDrs)
f1 = self.simplify().fol();
f2 = other.simplify().fol();
return f1.equiv(f2, prover)
def _get_type(self):
raise AttributeError("'%s' object has no attribute 'type'" %
self.__class__.__name__)
type = property(_get_type)
def typecheck(self, signature=None):
raise NotImplementedError()
def __add__(self, other):
return DrtConcatenation(self, other, None)
def get_refs(self, recursive=False):
"""
Return the set of discourse referents in this DRS.
@param recursive: C{boolean} Also find discourse referents in subterms?
@return: C{list} of C{Variable}s
"""
raise NotImplementedError()
def is_pronoun_function(self):
""" Is self of the form "PRO(x)"? """
return isinstance(self, DrtApplicationExpression) and \
isinstance(self.function, DrtAbstractVariableExpression) and \
self.function.variable.name == DrtTokens.PRONOUN and \
isinstance(self.argument, DrtIndividualVariableExpression)
def make_EqualityExpression(self, first, second):
return DrtEqualityExpression(first, second)
def make_VariableExpression(self, variable):
return DrtVariableExpression(variable)
def resolve_anaphora(self):
return resolve_anaphora(self)
def eliminate_equality(self):
def combinator(a, *additional):
if len(additional) == 0:
return self.__class__(a)
elif len(additional) == 1:
return self.__class__(a, additional[0])
return self.visit(lambda e: isinstance(e,Variable)
and e or e.eliminate_equality(), combinator, set())
def pprint(self):
"""
Draw the DRS
"""
print self.pretty()
def pretty(self):
"""
Draw the DRS
@return: the pretty print string
"""
return '\n'.join(self._pretty())
def draw(self):
DrsDrawer(self).draw()
class DRS(AbstractDrs, Expression):
"""A Discourse Representation Structure."""
def __init__(self, refs, conds, consequent=None):
"""
@param refs: C{list} of C{DrtIndividualVariableExpression} for the
discourse referents
@param conds: C{list} of C{Expression} for the conditions
"""
self.refs = refs
self.conds = conds
self.consequent = consequent
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
"""Replace all instances of variable v with expression E in self,
where v is free in self."""
if variable in self.refs:
#if a bound variable is the thing being replaced
if not replace_bound:
return self
else:
i = self.refs.index(variable)
if self.consequent:
consequent = self.consequent.replace(variable, expression, True, alpha_convert)
else:
consequent = None
return DRS(self.refs[:i]+[expression.variable]+self.refs[i+1:],
[cond.replace(variable, expression, True, alpha_convert)
for cond in self.conds],
consequent)
else:
if alpha_convert:
# any bound variable that appears in the expression must
# be alpha converted to avoid a conflict
for ref in (set(self.refs) & expression.free()):
newvar = unique_variable(ref)
newvarex = DrtVariableExpression(newvar)
i = self.refs.index(ref)
if self.consequent:
consequent = self.consequent.replace(ref, newvarex, True, alpha_convert)
else:
consequent = None
self = DRS(self.refs[:i]+[newvar]+self.refs[i+1:],
[cond.replace(ref, newvarex, True, alpha_convert)
for cond in self.conds],
consequent)
#replace in the conditions
if self.consequent:
consequent = self.consequent.replace(variable, expression, replace_bound, alpha_convert)
else:
consequent = None
return DRS(self.refs,
[cond.replace(variable, expression, replace_bound, alpha_convert)
for cond in self.conds],
consequent)
def variables(self):
"""@see: Expression.variables()"""
conds_vars = reduce(operator.or_,
[c.variables() for c in self.conds], set())
if self.consequent:
conds_vars.update(self.consequent.variables())
return conds_vars - set(self.refs)
def free(self, indvar_only=True):
"""@see: Expression.free()"""
conds_free = reduce(operator.or_,
[c.free(indvar_only) for c in self.conds], set())
if self.consequent:
conds_free.update(self.consequent.free())
return conds_free - set(self.refs)
def get_refs(self, recursive=False):
"""@see: AbstractExpression.get_refs()"""
if recursive:
conds_refs = self.refs + sum((c.get_refs(True) for c in self.conds), [])
if self.consequent:
conds_refs.extend(self.consequent.get_refs(True))
return conds_refs
else:
return self.refs
def visit(self, function, combinator, default):
"""@see: Expression.visit()"""
parts = self.refs + self.conds
if self.consequent:
parts.append(self.consequent)
return reduce(combinator, map(function, parts), default)
def eliminate_equality(self):
drs = self
i = 0
while i < len(drs.conds):
cond = drs.conds[i]
if isinstance(cond, EqualityExpression) and \
isinstance(cond.first, AbstractVariableExpression) and \
isinstance(cond.second, AbstractVariableExpression):
drs = DRS(list(set(drs.refs)-set([cond.second.variable])),
drs.conds[:i]+drs.conds[i+1:],
drs.consequent)
if cond.second.variable != cond.first.variable:
drs = drs.replace(cond.second.variable, cond.first, False, False)
i = 0
i -= 1
i += 1
conds = []
for cond in drs.conds:
new_cond = cond.eliminate_equality()
new_cond_simp = new_cond.simplify()
if not isinstance(new_cond_simp, DRS) or \
new_cond_simp.refs or new_cond_simp.conds or \
new_cond_simp.consequent:
conds.append(new_cond)
if drs.consequent:
consequent = drs.consequent.eliminate_equality()
else:
consequent = None
return DRS(drs.refs, conds, consequent)
def simplify(self):
if self.consequent:
consequent = self.consequent.simplify()
else:
consequent = None
return DRS(self.refs,
[cond.simplify() for cond in self.conds],
consequent)
def fol(self):
if self.consequent:
accum = None
if self.conds:
accum = reduce(AndExpression, [c.fol() for c in self.conds])
if accum:
accum = ImpExpression(accum, self.consequent.fol())
else:
accum = self.consequent.fol()
for ref in self.refs[::-1]:
accum = AllExpression(ref, accum)
return accum
else:
if not self.conds:
raise Exception("Cannot convert DRS with no conditions to FOL.")
accum = reduce(AndExpression, [c.fol() for c in self.conds])
for ref in map(Variable, self._order_ref_strings(self.refs)[::-1]):
accum = ExistsExpression(ref, accum)
return accum
def _pretty(self):
refs_line = ' '.join(self._order_ref_strings(self.refs))
cond_lines = sum([filter(str.strip, cond._pretty()) for cond in self.conds], [])
length = max([len(refs_line)] + map(len, cond_lines))
drs = [' _' + '_'*length + '_ ',
'| ' + refs_line + ' '*(length-len(refs_line)) + ' |',
'|-' + '-'*length + '-|'] + \
['| ' + line + ' '*(length-len(line)) + ' |' for line in cond_lines] + \
['|_' + '_'*length + '_|']
if self.consequent:
return DrtBinaryExpression._assemble_pretty(drs, DrtTokens.IMP,
self.consequent._pretty())
return drs
def _order_ref_strings(self, refs):
strings = map(str, refs)
ind_vars = []
func_vars = []
event_vars = []
other_vars = []
for s in strings:
if is_indvar(s):
ind_vars.append(s)
elif is_funcvar(s):
func_vars.append(s)
elif is_eventvar(s):
event_vars.append(s)
else:
other_vars.append(s)
return sorted(other_vars) + \
sorted(event_vars, key=lambda v: int([v[2:],-1][len(v[2:]) == 0])) + \
sorted(func_vars, key=lambda v: (v[0], int([v[1:],-1][len(v[1:])==0]))) + \
sorted(ind_vars, key=lambda v: (v[0], int([v[1:],-1][len(v[1:])==0])))
def __eq__(self, other):
r"""Defines equality modulo alphabetic variance.
If we are comparing \x.M and \y.N, then check equality of M and N[x/y]."""
if isinstance(other, DRS):
if len(self.refs) == len(other.refs):
converted_other = other
for (r1, r2) in zip(self.refs, converted_other.refs):
varex = self.make_VariableExpression(r1)
converted_other = converted_other.replace(r2, varex, True)
if self.consequent == converted_other.consequent and \
len(self.conds) == len(converted_other.conds):
for c1, c2 in zip(self.conds, converted_other.conds):
if not (c1 == c2):
return False
return True
return False
def __str__(self):
drs = '([%s],[%s])' % (','.join(self._order_ref_strings(self.refs)),
', '.join(map(str, self.conds)))
if self.consequent:
return DrtTokens.OPEN + drs + ' ' + DrtTokens.IMP + ' ' + \
str(self.consequent) + DrtTokens.CLOSE
return drs
def DrtVariableExpression(variable):
"""
This is a factory method that instantiates and returns a subtype of
C{DrtAbstractVariableExpression} appropriate for the given variable.
"""
if is_indvar(variable.name):
return DrtIndividualVariableExpression(variable)
elif is_funcvar(variable.name):
return DrtFunctionVariableExpression(variable)
elif is_eventvar(variable.name):
return DrtEventVariableExpression(variable)
else:
return DrtConstantExpression(variable)
class DrtAbstractVariableExpression(AbstractDrs, AbstractVariableExpression):
def fol(self):
return self
def get_refs(self, recursive=False):
"""@see: AbstractExpression.get_refs()"""
return []
def _pretty(self):
s = str(self)
blank = ' '*len(s)
return [blank, blank, s, blank]
class DrtIndividualVariableExpression(DrtAbstractVariableExpression, IndividualVariableExpression):
pass
class DrtFunctionVariableExpression(DrtAbstractVariableExpression, FunctionVariableExpression):
pass
class DrtEventVariableExpression(DrtIndividualVariableExpression, EventVariableExpression):
pass
class DrtConstantExpression(DrtAbstractVariableExpression, ConstantExpression):
pass
class DrtProposition(AbstractDrs, Expression):
def __init__(self, variable, drs):
self.variable = variable
self.drs = drs
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
if self.variable == variable:
assert isinstance(expression, DrtAbstractVariableExpression), "Can only replace a proposition label with a variable"
return DrtProposition(expression.variable, self.drs.replace(variable, expression, replace_bound, alpha_convert))
else:
return DrtProposition(self.variable, self.drs.replace(variable, expression, replace_bound, alpha_convert))
def eliminate_equality(self):
return DrtProposition(self.variable, self.drs.eliminate_equality())
def simplify(self):
return DrtProposition(self.variable, self.drs.simplify())
def get_refs(self, recursive=False):
if recursive:
return self.drs.get_refs(True)
else:
return []
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.variable == other.variable and \
self.drs == other.drs
def fol(self):
return self.drs.fol()
def _pretty(self):
drs_s = self.drs._pretty()
blank = ' '*(len(str(self.variable))+1)
return [blank + drs_s[0],
str(self.variable) + ':' + drs_s[1]] + \
map(lambda l: blank+l, drs_s[2:])
def visit(self, function, combinator, default):
return combinator(function(self.variable), function(self.drs))
def __str__(self):
return 'prop(%s, %s)' % (self.variable, self.drs)
class DrtNegatedExpression(AbstractDrs, NegatedExpression):
def fol(self):
return NegatedExpression(self.term.fol())
def get_refs(self, recursive=False):
"""@see: AbstractExpression.get_refs()"""
return self.term.get_refs(recursive)
def _pretty(self):
term_lines = self.term._pretty()
return [' ' + line for line in term_lines[:2]] + \
['__ ' + term_lines[2]] + \
[' | ' + term_lines[3]] + \
[' ' + line for line in term_lines[4:]]
class DrtLambdaExpression(AbstractDrs, LambdaExpression):
def alpha_convert(self, newvar):
"""Rename all occurrences of the variable introduced by this variable
binder in the expression to @C{newvar}.
@param newvar: C{Variable}, for the new variable
"""
return self.__class__(newvar, self.term.replace(self.variable,
DrtVariableExpression(newvar), True))
def fol(self):
return LambdaExpression(self.variable, self.term.fol())
def _pretty(self):
variables = [self.variable]
term = self.term
while term.__class__ == self.__class__:
variables.append(term.variable)
term = term.term
var_string = ' '.join(map(str, variables)) + DrtTokens.DOT
term_lines = term._pretty()
return [' ' + ' '*len(var_string) + line for line in term_lines[:1]] + \
[' \ ' + ' '*len(var_string) + term_lines[1]] + \
[' /\ ' + var_string + term_lines[2]] + \
[' ' + ' '*len(var_string) + line for line in term_lines[3:]]
class DrtBinaryExpression(AbstractDrs, BinaryExpression):
def get_refs(self, recursive=False):
"""@see: AbstractExpression.get_refs()"""
if recursive:
return self.first.get_refs(True) + self.second.get_refs(True)
else:
return []
def _pretty(self):
return DrtBinaryExpression._assemble_pretty(self._pretty_subex(self.first), self.getOp(), self._pretty_subex(self.second))
@staticmethod
def _assemble_pretty(first_lines, op, second_lines):
max_lines = max(len(first_lines), len(second_lines))
first_lines = first_lines + [' '*len(first_lines[0])]*(max_lines-len(first_lines))
second_lines = second_lines + [' '*len(second_lines[0])]*(max_lines-len(second_lines))
return [' ' + first_line + ' ' + ' '*len(op) + ' ' + second_line + ' ' for first_line, second_line in zip(first_lines, second_lines)[:2]] + \
['(' + first_lines[2] + ' ' + op + ' ' + second_lines[2] + ')'] + \
[' ' + first_line + ' ' + ' '*len(op) + ' ' + second_line + ' ' for first_line, second_line in zip(first_lines, second_lines)[3:]]
def _pretty_subex(self, subex):
return subex._pretty()
class DrtBooleanExpression(DrtBinaryExpression, BooleanExpression):
pass
class DrtOrExpression(DrtBooleanExpression, OrExpression):
def fol(self):
return OrExpression(self.first.fol(), self.second.fol())
def _pretty_subex(self, subex):
if isinstance(subex, DrtOrExpression):
return [line[1:-1] for line in subex._pretty()]
return DrtBooleanExpression._pretty_subex(self, subex)
class DrtEqualityExpression(DrtBinaryExpression, EqualityExpression):
def fol(self):
return EqualityExpression(self.first.fol(), self.second.fol())
class DrtConcatenation(DrtBooleanExpression):
"""DRS of the form '(DRS + DRS)'"""
def __init__(self, first, second, consequent=None):
DrtBooleanExpression.__init__(self, first, second)
self.consequent = consequent
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
"""Replace all instances of variable v with expression E in self,
where v is free in self."""
first = self.first
second = self.second
consequent = self.consequent
# If variable is bound
if variable in self.get_refs():
if replace_bound:
first = first.replace(variable, expression, replace_bound, alpha_convert)
second = second.replace(variable, expression, replace_bound, alpha_convert)
if consequent:
consequent = consequent.replace(variable, expression, replace_bound, alpha_convert)
else:
if alpha_convert:
# alpha convert every ref that is free in 'expression'
for ref in (set(self.get_refs(True)) & expression.free()):
v = DrtVariableExpression(unique_variable(ref))
first = first.replace(ref, v, True, alpha_convert)
second = second.replace(ref, v, True, alpha_convert)
if consequent:
consequent = consequent.replace(ref, v, True, alpha_convert)
first = first.replace(variable, expression, replace_bound, alpha_convert)
second = second.replace(variable, expression, replace_bound, alpha_convert)
if consequent:
consequent = consequent.replace(variable, expression, replace_bound, alpha_convert)
return self.__class__(first, second, consequent)
def eliminate_equality(self):
#TODO: at some point. for now, simplify.
drs = self.simplify()
assert not isinstance(drs, DrtConcatenation)
return drs.eliminate_equality()
def simplify(self):
first = self.first.simplify()
second = self.second.simplify()
if self.consequent:
consequent = self.consequent.simplify()
else:
consequent = None
if isinstance(first, DRS) and isinstance(second, DRS):
# For any ref that is in both 'first' and 'second'
for ref in (set(first.get_refs(True)) & set(second.get_refs(True))):
# alpha convert the ref in 'second' to prevent collision
newvar = DrtVariableExpression(unique_variable(ref))
second = second.replace(ref, newvar, True)
return DRS(first.refs + second.refs, first.conds + second.conds, consequent)
else:
return self.__class__(first, second, consequent)
def get_refs(self, recursive=False):
"""@see: AbstractExpression.get_refs()"""
refs = self.first.get_refs(recursive) + self.second.get_refs(recursive)
if self.consequent and recursive:
refs.extend(self.consequent.get_refs(True))
return refs
def getOp(self):
return DrtTokens.DRS_CONC
def __eq__(self, other):
r"""Defines equality modulo alphabetic variance.
If we are comparing \x.M and \y.N, then check equality of M and N[x/y]."""
if isinstance(other, DrtConcatenation):
self_refs = self.get_refs()
other_refs = other.get_refs()
if len(self_refs) == len(other_refs):
converted_other = other
for (r1,r2) in zip(self_refs, other_refs):
varex = self.make_VariableExpression(r1)
converted_other = converted_other.replace(r2, varex, True)
return self.first == converted_other.first and \
self.second == converted_other.second and \
self.consequent == converted_other.consequent
return False
def fol(self):
e = AndExpression(self.first.fol(), self.second.fol())
if self.consequent:
e = ImpExpression(e, self.consequent.fol())
return e
def _pretty(self):
drs = DrtBinaryExpression._assemble_pretty(self._pretty_subex(self.first),
self.getOp(),
self._pretty_subex(self.second))
if self.consequent:
drs = DrtBinaryExpression._assemble_pretty(drs, DrtTokens.IMP,
self._pretty(self.consequent))
return drs
def _pretty_subex(self, subex):
if isinstance(subex, DrtConcatenation):
return [line[1:-1] for line in subex._pretty()]
return DrtBooleanExpression._pretty_subex(self, subex)
def visit(self, function, combinator, default):
"""@see: Expression.visit()"""
if self.consequent:
return combinator(function(self.first), function(self.second), function(self.consequent))
else:
return combinator(function(self.first), function(self.second))
def __str__(self):
first = self._str_subex(self.first)
second = self._str_subex(self.second)
drs = Tokens.OPEN + first + ' ' + self.getOp() \
+ ' ' + second + Tokens.CLOSE
if self.consequent:
return DrtTokens.OPEN + drs + ' ' + DrtTokens.IMP + ' ' + \
str(self.consequent) + DrtTokens.CLOSE
return drs
def _str_subex(self, subex):
s = str(subex)
if isinstance(subex, DrtConcatenation) and subex.consequent is None:
return s[1:-1]
return s
class DrtApplicationExpression(AbstractDrs, ApplicationExpression):
def fol(self):
return ApplicationExpression(self.function.fol(), self.argument.fol())
def get_refs(self, recursive=False):
"""@see: AbstractExpression.get_refs()"""
if recursive:
return self.function.get_refs(True) + self.argument.get_refs(True)
else:
return []
def _pretty(self):
function, args = self.uncurry()
function_lines = function._pretty()
args_lines = [arg._pretty() for arg in args]
max_lines = max(map(len, [function_lines] + args_lines))
function_lines = function_lines + [' '*len(function_lines[0])]*(max_lines-len(function_lines))
args_lines = [arg_lines + [' '*len(arg_lines[0])]*(max_lines-len(arg_lines)) for arg_lines in args_lines]
return [func_line + ' ' + ' '.join(args_line) + ' ' for func_line, args_line in zip(function_lines, zip(*args_lines))[:2]] + \
[function_lines[2] + '(' + ','.join(zip(*args_lines)[2]) + ')'] + \
[func_line + ' ' + ' '.join(args_line) + ' ' for func_line, args_line in zip(function_lines, zip(*args_lines))[3:]]
class PossibleAntecedents(list, AbstractDrs, Expression):
def free(self, indvar_only=True):
"""Set of free variables."""
return set(self)
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
"""Replace all instances of variable v with expression E in self,
where v is free in self."""
result = PossibleAntecedents()
for item in self:
if item == variable:
self.append(expression)
else:
self.append(item)
return result
def _pretty(self):
s = str(self)
blank = ' '*len(s)
return [blank,blank,s]
def __str__(self):
return '[' + ','.join(map(str, self)) + ']'
class AnaphoraResolutionException(Exception):
pass
def resolve_anaphora(expression, trail=[]):
if isinstance(expression, ApplicationExpression):
if expression.is_pronoun_function():
possible_antecedents = PossibleAntecedents()
for ancestor in trail:
for ref in ancestor.get_refs():
refex = expression.make_VariableExpression(ref)
#==========================================================
# Don't allow resolution to itself or other types
#==========================================================
if refex.__class__ == expression.argument.__class__ and \
not (refex == expression.argument):
possible_antecedents.append(refex)
if len(possible_antecedents) == 1:
resolution = possible_antecedents[0]
else:
resolution = possible_antecedents
return expression.make_EqualityExpression(expression.argument, resolution)
else:
r_function = resolve_anaphora(expression.function, trail + [expression])
r_argument = resolve_anaphora(expression.argument, trail + [expression])
return expression.__class__(r_function, r_argument)
elif isinstance(expression, DRS):
r_conds = []
for cond in expression.conds:
r_cond = resolve_anaphora(cond, trail + [expression])
# if the condition is of the form '(x = [])' then raise exception
if isinstance(r_cond, EqualityExpression):
if isinstance(r_cond.first, PossibleAntecedents):
#Reverse the order so that the variable is on the left
temp = r_cond.first
r_cond.first = r_cond.second
r_cond.second = temp
if isinstance(r_cond.second, PossibleAntecedents):
if not r_cond.second:
raise AnaphoraResolutionException("Variable '%s' does not "
"resolve to anything." % r_cond.first)
r_conds.append(r_cond)
if expression.consequent:
consequent = resolve_anaphora(expression.consequent, trail + [expression])
else:
consequent = None
return expression.__class__(expression.refs, r_conds, consequent)
elif isinstance(expression, AbstractVariableExpression):
return expression
elif isinstance(expression, NegatedExpression):
return expression.__class__(resolve_anaphora(expression.term, trail + [expression]))
elif isinstance(expression, DrtConcatenation):
if expression.consequent:
consequent = resolve_anaphora(expression.consequent, trail + [expression])
else:
consequent = None
return expression.__class__(resolve_anaphora(expression.first, trail + [expression]),
resolve_anaphora(expression.second, trail + [expression]),
consequent)
elif isinstance(expression, BinaryExpression):
return expression.__class__(resolve_anaphora(expression.first, trail + [expression]),
resolve_anaphora(expression.second, trail + [expression]))
elif isinstance(expression, LambdaExpression):
return expression.__class__(expression.variable, resolve_anaphora(expression.term, trail + [expression]))
class DrsDrawer(object):
BUFFER = 3 #Space between elements
TOPSPACE = 10 #Space above whole DRS
OUTERSPACE = 6 #Space to the left, right, and bottom of the whle DRS
def __init__(self, drs, size_canvas=True, canvas=None):
"""
@param drs: C{AbstractDrs}, The DRS to be drawn
@param size_canvas: C{boolean}, True if the canvas size should be the exact size of the DRS
@param canvas: C{Canvas} The canvas on which to draw the DRS. If none is given, create a new canvas.
"""
master = None
if not canvas:
master = Tk()
master.title("DRT")
font = Font(family='helvetica', size=12)
if size_canvas:
canvas = Canvas(master, width=0, height=0)
canvas.font = font
self.canvas = canvas
(right, bottom) = self._visit(drs, self.OUTERSPACE, self.TOPSPACE)
width = max(right+self.OUTERSPACE, 100)
height = bottom+self.OUTERSPACE
canvas = Canvas(master, width=width, height=height)#, bg='white')
else:
canvas = Canvas(master, width=300, height=300)
canvas.pack()
canvas.font = font
self.canvas = canvas
self.drs = drs
self.master = master
def _get_text_height(self):
"""Get the height of a line of text"""
return self.canvas.font.metrics("linespace")
def draw(self, x=OUTERSPACE, y=TOPSPACE):
"""Draw the DRS"""
self._handle(self.drs, self._draw_command, x, y)
if self.master and not in_idle():
self.master.mainloop()
else:
return self._visit(self.drs, x, y)
def _visit(self, expression, x, y):
"""
Return the bottom-rightmost point without actually drawing the item
@param expression: the item to visit
@param x: the top of the current drawing area
@param y: the left side of the current drawing area
@return: the bottom-rightmost point
"""
return self._handle(expression, self._visit_command, x, y)
def _draw_command(self, item, x, y):
"""
Draw the given item at the given location
@param item: the item to draw
@param x: the top of the current drawing area
@param y: the left side of the current drawing area
@return: the bottom-rightmost point
"""
if isinstance(item, str):
self.canvas.create_text(x, y, anchor='nw', font=self.canvas.font, text=item)
elif isinstance(item, tuple):
# item is the lower-right of a box
(right, bottom) = item
self.canvas.create_rectangle(x, y, right, bottom)
horiz_line_y = y + self._get_text_height() + (self.BUFFER * 2) #the line separating refs from conds
self.canvas.create_line(x, horiz_line_y, right, horiz_line_y)
return self._visit_command(item, x, y)
def _visit_command(self, item, x, y):
"""
Return the bottom-rightmost point without actually drawing the item
@param item: the item to visit
@param x: the top of the current drawing area
@param y: the left side of the current drawing area
@return: the bottom-rightmost point
"""
if isinstance(item, str):
return (x + self.canvas.font.measure(item), y + self._get_text_height())
elif isinstance(item, tuple):
return item
def _handle(self, expression, command, x=0, y=0):
"""
@param expression: the expression to handle
@param command: the function to apply, either _draw_command or _visit_command
@param x: the top of the current drawing area
@param y: the left side of the current drawing area
@return: the bottom-rightmost point
"""
if command == self._visit_command:
#if we don't need to draw the item, then we can use the cached values
try:
#attempt to retrieve cached values
right = expression._drawing_width + x
bottom = expression._drawing_height + y
return (right, bottom)
except AttributeError:
#the values have not been cached yet, so compute them
pass
if isinstance(expression, DrtAbstractVariableExpression):
factory = self._handle_VariableExpression
elif isinstance(expression, DRS):
factory = self._handle_DRS
elif isinstance(expression, DrtNegatedExpression):
factory = self._handle_NegatedExpression
elif isinstance(expression, DrtLambdaExpression):
factory = self._handle_LambdaExpression
elif isinstance(expression, BinaryExpression):
factory = self._handle_BinaryExpression
elif isinstance(expression, DrtApplicationExpression):
factory = self._handle_ApplicationExpression
elif isinstance(expression, PossibleAntecedents):
factory = self._handle_VariableExpression
elif isinstance(expression, DrtProposition):
factory = self._handle_DrtProposition
else:
raise Exception, expression.__class__.__name__
(right, bottom) = factory(expression, command, x, y)
#cache the values
expression._drawing_width = right - x
expression._drawing_height = bottom - y
return (right, bottom)
def _handle_VariableExpression(self, expression, command, x, y):
return command(str(expression), x, y)
def _handle_NegatedExpression(self, expression, command, x, y):
# Find the width of the negation symbol
right = self._visit_command(DrtTokens.NOT, x, y)[0]
# Handle term
(right, bottom) = self._handle(expression.term, command, right, y)
# Handle variables now that we know the y-coordinate
command(DrtTokens.NOT, x, self._get_centered_top(y, bottom - y, self._get_text_height()))
return (right, bottom)
def _handle_DRS(self, expression, command, x, y):
left = x + self.BUFFER #indent the left side
bottom = y + self.BUFFER #indent the top
# Handle Discourse Referents
if expression.refs:
refs = ' '.join(map(str, expression.refs))
else:
refs = ' '
(max_right, bottom) = command(refs, left, bottom)
bottom += (self.BUFFER * 2)
# Handle Conditions
if expression.conds:
for cond in expression.conds:
(right, bottom) = self._handle(cond, command, left, bottom)
max_right = max(max_right, right)
bottom += self.BUFFER
else:
bottom += self._get_text_height() + self.BUFFER
# Handle Box
max_right += self.BUFFER
return command((max_right, bottom), x, y)
def _handle_ApplicationExpression(self, expression, command, x, y):
function, args = expression.uncurry()
if not isinstance(function, DrtAbstractVariableExpression):
#It's not a predicate expression ("P(x,y)"), so leave arguments curried
function = expression.function
args = [expression.argument]
# Get the max bottom of any element on the line
function_bottom = self._visit(function, x, y)[1]
max_bottom = max([function_bottom] + [self._visit(arg, x, y)[1] for arg in args])
line_height = max_bottom - y
# Handle 'function'
function_drawing_top = self._get_centered_top(y, line_height, function._drawing_height)
right = self._handle(function, command, x, function_drawing_top)[0]
# Handle open paren
centred_string_top = self._get_centered_top(y, line_height, self._get_text_height())
right = command(DrtTokens.OPEN, right, centred_string_top)[0]
# Handle each arg
for (i,arg) in enumerate(args):
arg_drawing_top = self._get_centered_top(y, line_height, arg._drawing_height)
right = self._handle(arg, command, right, arg_drawing_top)[0]
if i+1 < len(args):
#since it's not the last arg, add a comma
right = command(DrtTokens.COMMA + ' ', right, centred_string_top)[0]
# Handle close paren
right = command(DrtTokens.CLOSE, right, centred_string_top)[0]
return (right, max_bottom)
def _handle_LambdaExpression(self, expression, command, x, y):
# Find the width of the lambda symbol and abstracted variables
variables = DrtTokens.LAMBDA + str(expression.variable) + DrtTokens.DOT
right = self._visit_command(variables, x, y)[0]
# Handle term
(right, bottom) = self._handle(expression.term, command, right, y)
# Handle variables now that we know the y-coordinate
command(variables, x, self._get_centered_top(y, bottom - y, self._get_text_height()))
return (right, bottom)
def _handle_BinaryExpression(self, expression, command, x, y):
# Get the full height of the line, based on the operands
first_height = self._visit(expression.first, 0, 0)[1]
second_height = self._visit(expression.second, 0, 0)[1]
line_height = max(first_height, second_height)
# Handle open paren
centred_string_top = self._get_centered_top(y, line_height, self._get_text_height())
right = command(DrtTokens.OPEN, x, centred_string_top)[0]
# Handle the first operand
first_height = expression.first._drawing_height
(right, first_bottom) = self._handle(expression.first, command, right, self._get_centered_top(y, line_height, first_height))
# Handle the operator
right = command(' %s ' % expression.getOp(), right, centred_string_top)[0]
# Handle the second operand
second_height = expression.second._drawing_height
(right, second_bottom) = self._handle(expression.second, command, right, self._get_centered_top(y, line_height, second_height))
# Handle close paren
right = command(DrtTokens.CLOSE, right, centred_string_top)[0]
return (right, max(first_bottom, second_bottom))
def _handle_DrtProposition(self, expression, command, x, y):
# Find the width of the negation symbol
right = command(expression.variable, x, y)[0]
# Handle term
(right, bottom) = self._handle(expression.term, command, right, y)
return (right, bottom)
def _get_centered_top(self, top, full_height, item_height):
"""Get the y-coordinate of the point that a figure should start at if
its height is 'item_height' and it needs to be centered in an area that
starts at 'top' and is 'full_height' tall."""
return top + (full_height - item_height) / 2
class DrtParser(LogicParser):
"""A lambda calculus expression parser."""
def __init__(self):
LogicParser.__init__(self)
self.operator_precedence = dict(
[(x,1) for x in DrtTokens.LAMBDA_LIST] + \
[(x,2) for x in DrtTokens.NOT_LIST] + \
[(APP,3)] + \
[(x,4) for x in DrtTokens.EQ_LIST+Tokens.NEQ_LIST] + \
[(DrtTokens.DRS_CONC,5)] + \
[(x,6) for x in DrtTokens.OR_LIST] + \
[(x,7) for x in DrtTokens.IMP_LIST] + \
[(None,8)])
def get_all_symbols(self):
"""This method exists to be overridden"""
return DrtTokens.SYMBOLS
def isvariable(self, tok):
return tok not in DrtTokens.TOKENS
def handle(self, tok, context):
"""This method is intended to be overridden for logics that
use different operators or expressions"""
if tok in DrtTokens.NOT_LIST:
return self.handle_negation(tok, context)
elif tok in DrtTokens.LAMBDA_LIST:
return self.handle_lambda(tok, context)
elif tok == DrtTokens.OPEN:
if self.inRange(0) and self.token(0) == DrtTokens.OPEN_BRACKET:
return self.handle_DRS(tok, context)
else:
return self.handle_open(tok, context)
elif tok.upper() == DrtTokens.DRS:
self.assertNextToken(DrtTokens.OPEN)
return self.handle_DRS(tok, context)
elif self.isvariable(tok):
return self.handle_variable(tok, context)
def make_NegatedExpression(self, expression):
return DrtNegatedExpression(expression)
def handle_DRS(self, tok, context):
# a DRS
refs = self.handle_refs()
if self.inRange(0) and self.token(0) == DrtTokens.COMMA: #if there is a comma (it's optional)
self.token() # swallow the comma
conds = self.handle_conds(context)
self.assertNextToken(DrtTokens.CLOSE)
return DRS(refs, conds, None)
def handle_refs(self):
self.assertNextToken(DrtTokens.OPEN_BRACKET)
refs = []
while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET:
# Support expressions like: DRS([x y],C) == DRS([x,y],C)
if refs and self.token(0) == DrtTokens.COMMA:
self.token() # swallow the comma
refs.append(self.get_next_token_variable('quantified'))
self.assertNextToken(DrtTokens.CLOSE_BRACKET)
return refs
def handle_conds(self, context):
self.assertNextToken(DrtTokens.OPEN_BRACKET)
conds = []
while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET:
# Support expressions like: DRS([x y],C) == DRS([x, y],C)
if conds and self.token(0) == DrtTokens.COMMA:
self.token() # swallow the comma
conds.append(self.parse_Expression(context))
self.assertNextToken(DrtTokens.CLOSE_BRACKET)
return conds
def make_EqualityExpression(self, first, second):
"""This method serves as a hook for other logic parsers that
have different equality expression classes"""
return DrtEqualityExpression(first, second)
def get_BooleanExpression_factory(self, tok):
"""This method serves as a hook for other logic parsers that
have different boolean operators"""
if tok == DrtTokens.DRS_CONC:
return lambda first, second: DrtConcatenation(first, second, None)
elif tok in DrtTokens.OR_LIST:
return DrtOrExpression
elif tok in DrtTokens.IMP_LIST:
def make_imp_expression(first, second):
if isinstance(first, DRS):
return DRS(first.refs, first.conds, second)
if isinstance(first, DrtConcatenation):
return DrtConcatenation(first.first, first.second, second)
raise Exception('Antecedent of implication must be a DRS')
return make_imp_expression
else:
return None
def make_BooleanExpression(self, factory, first, second):
return factory(first, second)
def make_ApplicationExpression(self, function, argument):
return DrtApplicationExpression(function, argument)
def make_VariableExpression(self, name):
return DrtVariableExpression(Variable(name))
def make_LambdaExpression(self, variables, term):
return DrtLambdaExpression(variables, term)
def demo():
print '='*20 + 'TEST PARSE' + '='*20
parser = DrtParser()
print parser.parse(r'([x,y],[sees(x,y)])')
print parser.parse(r'([x],[man(x), walks(x)])')
print parser.parse(r'\x.\y.([],[sees(x,y)])')
print parser.parse(r'\x.([],[walks(x)])(john)')
print parser.parse(r'(([x],[walks(x)]) + ([y],[runs(y)]))')
print parser.parse(r'(([],[walks(x)]) -> ([],[runs(x)]))')
print parser.parse(r'([x],[PRO(x), sees(John,x)])')
print parser.parse(r'([x],[man(x), -([],[walks(x)])])')
print parser.parse(r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])')
print '='*20 + 'Test fol()' + '='*20
print parser.parse(r'([x,y],[sees(x,y)])').fol()
print '='*20 + 'Test alpha conversion and lambda expression equality' + '='*20
e1 = parser.parse(r'\x.([],[P(x)])')
print e1
e2 = e1.alpha_convert(Variable('z'))
print e2
print e1 == e2
print '='*20 + 'Test resolve_anaphora()' + '='*20
print resolve_anaphora(parser.parse(r'([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])'))
print resolve_anaphora(parser.parse(r'([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])'))
print resolve_anaphora(parser.parse(r'(([x,y],[]) + ([],[PRO(x)]))'))
print '='*20 + 'Test pprint()' + '='*20
parser.parse(r"([],[])").pprint()
parser.parse(r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])").pprint()
parser.parse(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pprint()
parser.parse(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pprint()
parser.parse(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pprint()
def test_draw():
expressions = [
r'x',
r'([],[])',
r'([x],[])',
r'([x],[man(x)])',
r'([x,y],[sees(x,y)])',
r'([x],[man(x), walks(x)])',
r'\x.([],[man(x), walks(x)])',
r'\x y.([],[sees(x,y)])',
r'([],[(([],[walks(x)]) + ([],[runs(x)]))])',
r'([x],[man(x), -([],[walks(x)])])',
r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])'
]
for e in expressions:
d = DrtParser().parse(e)
d.draw()
if __name__ == '__main__':
demo()
|
|
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$URL$
#$Date$
#$Revision$
'''
This module provides one useful class: NodeHandler
The NodeHandler class is designed to be subclassed. Each subclass
should support the processing that createpdf.RstToPdf needs to do
on a particular type of node that could appear in a document tree.
When the subclass is defined, it should reference NodeHandler as
the first base class, and one or more docutils node classes as
subsequent base classes.
These docutils node classes will not actually wind up in the
base classes of the subclass. Instead, they will be used as
keys in a dispatch dictionary which is used to find the correct
NodeHandler subclass to use to process an instance of a given
docutils node class.
When an instance of createpdf.RstToPdf is created, a NodeHandler
instance will be called to return dispatchers for gather_elements
and gather_pdftext, wrapped up as methods of the createpdf.RstToPdf
class.
When a dispatcher is called, it will dispatch to the correct subclass
to handle the given docutils node instance.
If no NodeHandler subclass has been created to handle that particular
type of docutils node, then default processing will occur and a warning
will be logged.
'''
import types
import inspect
from log import log, nodeid
from smartypants import smartyPants
import docutils.nodes
from flowables import BoundByWidth, TocEntry
class MetaHelper(type):
''' MetaHelper is designed to generically enable a few of the benefits of
using metaclasses by encapsulating some of the complexity of setting
them up.
If a base class uses MetaHelper (by assigning __metaclass__ = MetaHelper),
then that class (and its metaclass inheriting subclasses) can control
class creation behavior by defining a couple of helper functions.
1) A base class can define a _classpreinit function. This function
is called during __new__ processing of the class object itself,
but only during subclass creation (not when the class defining
the _classpreinit is itself created).
The subclass object does not yet exist at the time _classpreinit
is called. _classpreinit accepts all the parameters of the
__new__ function for the class itself (not the same as the __new__
function for the instantiation of class objects!) and must return
a tuple of the same objects. A typical use of this would be to
modify the class bases before class creation.
2) Either a base class or a subclass can define a _classinit() function.
This function will be called immediately after the actual class has
been created, and can do whatever setup is required for the class.
Note that every base class (but not every subclass) which uses
MetaHelper MUST define _classinit, even if that definition is None.
MetaHelper also places an attribute into each class created with it.
_baseclass is set to None if this class has no superclasses which
also use MetaHelper, or to the first such MetaHelper-using baseclass.
_baseclass can be explicitly set inside the class definition, in
which case MetaHelper will not override it.
'''
def __new__(clstype, name, bases, clsdict):
# Our base class is the first base in the class definition which
# uses MetaHelper, or None if no such base exists.
base = ([x for x in bases if type(x) is MetaHelper] + [None])[0]
# Only set our base into the class if it has not been explicitly
# set
clsdict.setdefault('_baseclass', base)
# See if the base class definied a preinit function, and call it
# if so.
preinit = getattr(base, '_classpreinit', None)
if preinit is not None:
clstype, name, bases, clsdict = preinit(clstype, name, bases, clsdict)
# Delegate the real work to type
return type.__new__(clstype, name, bases, clsdict)
def __init__(cls, name, bases, clsdict):
# Let type build the class for us
type.__init__(cls, name, bases, clsdict)
# Call the class's initialization function if defined
if cls._classinit is not None:
cls._classinit()
class NodeHandler(object):
''' NodeHandler classes are used to dispatch
to the correct class to handle some node class
type, via a dispatchdict in the main class.
'''
__metaclass__ = MetaHelper
@classmethod
def _classpreinit(baseclass, clstype, name, bases, clsdict):
# _classpreinit is called before the actual class is built
# Perform triage on the class bases to separate actual
# inheritable bases from the target docutils node classes
# which we want to dispatch for.
new_bases = []
targets = []
for target in bases:
if target is not object:
(targets, new_bases)[issubclass(target, NodeHandler)].append(target)
clsdict['_targets'] = targets
return clstype, name, tuple(new_bases), clsdict
@classmethod
def _classinit(cls):
# _classinit() is called once the subclass has actually
# been created.
# For the base class, just add a dispatch dictionary
if cls._baseclass is None:
cls.dispatchdict = {}
return
# for subclasses, instantiate them, and then add
# the class to the dispatch dictionary for each of its targets.
self = cls()
for target in cls._targets:
if cls.dispatchdict.setdefault(target, self) is not self:
t = repr(target)
old = repr(cls.dispatchdict[target])
new = repr(self)
log.debug('Dispatch handler %s for node type %s overridden by %s' %
(old, t, new))
cls.dispatchdict[target] = self
@staticmethod
def getclassname(obj):
cln = repr(obj.__class__)
info = cln.split("'")
if len(info) == 3:
return info[1]
return cln
def log_unknown(self, node, during):
if not hasattr(self, 'unkn_node'):
self.unkn_node = set()
cln=self.getclassname(node)
if not cln in self.unkn_node:
self.unkn_node.add(cln)
log.warning("Unkn. node (self.%s): %s [%s]",
during, cln, nodeid(node))
try:
log.debug(node)
except (UnicodeDecodeError, UnicodeEncodeError):
log.debug(repr(node))
def findsubclass(self, node, during):
handlerinfo = '%s.%s' % (self.getclassname(self), during)
log.debug("%s: %s", handlerinfo, self.getclassname(node))
log.debug("%s: [%s]", handlerinfo, nodeid(node))
try:
log.debug("%s: %s", handlerinfo, node)
except (UnicodeDecodeError, UnicodeEncodeError):
log.debug("%s: %r", handlerninfo, node)
log.debug("")
# Dispatch to the first matching class in the MRO
dispatchdict = self.dispatchdict
for baseclass in inspect.getmro(node.__class__):
result = dispatchdict.get(baseclass)
if result is not None:
break
else:
self.log_unknown(node, during)
result = self
return result
def __call__(self, client):
''' Get the dispatchers, wrapped up as methods for the client'''
textdispatch = types.MethodType(self.textdispatch, client)
elemdispatch = types.MethodType(self.elemdispatch, client)
return textdispatch, elemdispatch
# This overridable attribute will be set true in the instance
# if handling a sphinx document
sphinxmode = False
# Begin overridable attributes and methods for elemdispatch
def gather_elements(self, client, node, style):
return client.gather_elements(node, style=style)
def getstyle(self, client, node, style):
try:
if node['classes'] and node['classes'][0]:
# FIXME: Supports only one class, sorry ;-)
if client.styles.StyleSheet.has_key(node['classes'][0]):
style = client.styles[node['classes'][0]]
else:
log.info("Unknown class %s, ignoring. [%s]",
node['classes'][0], nodeid(node))
except TypeError: # Happens when a docutils.node.Text reaches here
pass
if style is None or style == client.styles['bodytext']:
style = client.styles.styleForNode(node)
return style
def getelements(self, client, node, style):
style = self.getstyle(client, node, style)
elements = self.gather_elements(client, node, style)
# Make all the sidebar cruft unreachable
#if style.__dict__.get('float','None').lower() !='none':
#node.elements=[Sidebar(node.elements,style)]
#elif 'width' in style.__dict__:
if 'width' in style.__dict__:
elements = [BoundByWidth(style.width,
elements, style, mode="shrink")]
return elements
# End overridable attributes and methods for elemdispatch
def elemdispatch(self, client, node, style=None):
self = self.findsubclass(node, 'elemdispatch')
# set anchors for internal references
try:
for i in node['ids']:
client.pending_targets.append(i)
except TypeError: #Happens with docutils.node.Text
pass
elements = self.getelements(client, node, style)
if node.line and client.debugLinesPdf:
elements.insert(0,TocEntry(client.depth-1,'LINE-%s'%node.line))
node.elements = elements
return elements
# Begin overridable attributes and methods for textdispatch
pre = ''
post = ''
def get_pre_post(self, client, node, replaceEnt):
return self.pre, self.post
def get_text(self, client, node, replaceEnt):
return client.gather_pdftext(node)
def apply_smartypants(self, text, smarty, node):
# Try to be clever about when to use smartypants
if node.__class__ in (docutils.nodes.paragraph,
docutils.nodes.block_quote, docutils.nodes.title):
return smartyPants(text, smarty)
return text
# End overridable attributes and methods for textdispatch
def textdispatch(self, client, node, replaceEnt=True):
self = self.findsubclass(node, 'textdispatch')
pre, post = self.get_pre_post(client, node, replaceEnt)
text = self.get_text(client, node, replaceEnt)
text = pre + text + post
try:
log.debug("%s.textdispatch: %s" % (self.getclassname(self), text))
except UnicodeDecodeError:
pass
text = self.apply_smartypants(text, client.smarty, node)
node.pdftext = text
return text
|
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example shows how to upload offline data for store sales transactions.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import datetime
import hashlib
from googleads import adwords
import pytz
_DT_FORMAT = '%Y%m%d %H%M%S'
# User identifier types whose values must be hashed.
_HASHED_IDENTIFIER_TYPES = ('HASHED_EMAIL', 'HASHED_FIRST_NAME',
'HASHED_LAST_NAME', 'HASHED_PHONE')
# The timezone to be used in this example. For valid timezone IDs, see:
# https://developers.google.com/adwords/api/docs/appendix/codes-formats#timezone-ids
_TIMEZONE = pytz.timezone('America/New_York')
# Name of the conversion tracker to upload to.
CONVERSION_NAME = 'INSERT_CONVERSION_NAME_HERE'
# Insert email addresses below for creating user identifiers.
EMAIL_ADDRESSES = ['EMAIL_ADDRESS_1', 'EMAIL_ADDRESS_2']
# The external upload ID can be any number that you use to keep track of your
# uploads.
EXTERNAL_UPLOAD_ID = 'INSERT_EXTERNAL_UPLOAD_ID'
# Store sales upload common metadata types
METADATA_TYPE_1P = 'FirstPartyUploadMetadata'
METADATA_TYPE_3P = 'ThirdPartyUploadMetadata'
# Set the below constant to METADATA_TYPE_3P if uploading third-party data.
STORE_SALES_UPLOAD_COMMON_METADATA_TYPE = METADATA_TYPE_1P
# The three constants below are needed when uploading third-party data. They
# are not used when uploading first-party data.
# Advertiser upload time to partner.
# For times, use the format yyyyMMdd HHmmss tz. For more details on formats,
# see:
# https://developers.google.com/adwords/api/docs/appendix/codes-formats#timezone-ids
ADVERTISER_UPLOAD_TIME = 'INSERT_ADVERTISER_UPLOAD_TIME_HERE'
# Indicates the version of the bridge map.
BRIDGE_MAP_VERSION_ID = 'INSERT_BRIDGE_MAP_VERSION_ID_HERE'
# The ID of the third party uploading the transaction feed.
PARTNER_ID = 'INSERT_PARTNER_ID_HERE'
def main(client, conversion_name, external_upload_id,
store_sales_upload_common_metadata_type, email_addresses,
advertiser_upload_time=None, bridge_map_version_id=None,
partner_id=None):
# Set partial failure to True since this example demonstrates how to handle
# partial errors.
client.partial_failure = True
# Initialize appropriate services.
offline_data_upload_service = client.GetService(
'OfflineDataUploadService', version='v201809')
# Create the first offline data for upload.
# This transaction occurred 7 days ago with an amount of $200 USD.
transaction_time_1 = (datetime.datetime.now(tz=_TIMEZONE) -
datetime.timedelta(days=7))
offline_data_1 = {
'StoreSalesTransaction': {
'userIdentifiers': [
_CreateUserIdentifier(identifier_type='HASHED_EMAIL',
value=email_addresses[0]),
_CreateUserIdentifier(identifier_type='STATE', value='New York')
],
'transactionTime': _GetFormattedDateTime(transaction_time_1),
'transactionAmount': {
'currencyCode': 'USD',
'money': {
'microAmount': 200000000
}
},
'conversionName': conversion_name
}
}
# Create the second offline data for upload.
# This transaction occurred 14 days ago with amount of 450 EUR.
transaction_time_2 = (datetime.datetime.now(tz=_TIMEZONE) -
datetime.timedelta(days=14))
offline_data_2 = {
'StoreSalesTransaction': {
'userIdentifiers': [
_CreateUserIdentifier(identifier_type='HASHED_EMAIL',
value=email_addresses[1]),
_CreateUserIdentifier(identifier_type='STATE', value='California')
],
'transactionTime': _GetFormattedDateTime(transaction_time_2),
'transactionAmount': {
'currencyCode': 'EUR',
'money': {
'microAmount': 450000000
}
},
'conversionName': conversion_name
}
}
# Set the type and metadata of this upload.
upload_metadata = {
'StoreSalesUploadCommonMetadata': {
'xsi_type': store_sales_upload_common_metadata_type,
'loyaltyRate': 1.0,
'transactionUploadRate': 1.0,
}
}
if store_sales_upload_common_metadata_type == METADATA_TYPE_1P:
upload_type = 'STORE_SALES_UPLOAD_FIRST_PARTY'
elif store_sales_upload_common_metadata_type == METADATA_TYPE_3P:
upload_type = 'STORE_SALES_UPLOAD_THIRD_PARTY'
upload_metadata['StoreSalesUploadCommonMetadata'].update({
'advertiserUploadTime': advertiser_upload_time,
'validTransactionRate': 1.0,
'partnerMatchRate': 1.0,
'partnerUploadRate': 1.0,
'bridgeMapVersionId': bridge_map_version_id,
'partnerId': partner_id
})
else:
raise ValueError('Unknown metadata type.')
# Create offline data upload
offline_data_upload = {
'externalUploadId': external_upload_id,
'offlineDataList': [offline_data_1, offline_data_2],
# Set the type of this upload.
'uploadType': upload_type,
'uploadMetadata': upload_metadata
}
# Create an offline data upload operation.
operations = [{
'operator': 'ADD',
'operand': offline_data_upload
}]
# Upload offline data on the server and print(the result.)
result = offline_data_upload_service.mutate(operations)
offline_data_upload = result['value'][0]
print('Uploaded offline data with external upload ID "%d" and upload status '
'"%s".' % (offline_data_upload['externalUploadId'],
offline_data_upload['uploadStatus']))
# Print any partial data errors from the response.
if result['partialFailureErrors']:
for api_error in result['partialFailureErrors']:
# Get the index of the failed operation from the error's field path
# elements.
operation_index = _GetFieldPathElementIndex(api_error, 'operations')
if operation_index:
failed_offline_data_upload = operations[operation_index]['operand']
# Get the index of the entry in the offline data list from the error's
# field path elements.
offline_data_list_index = _GetFieldPathElementIndex(
api_error, 'offlineDataList')
print('Offline data list entry "%d" in operation "%d" with external '
'upload ID "%d" and type "%s" has triggered failure for the '
'following reason: "%s"' % (
offline_data_list_index, operation_index,
failed_offline_data_upload['externalUploadId'],
failed_offline_data_upload['uploadType'],
api_error['errorString']))
else:
print('A failure has occurred for the following reason: "%s".' % (
api_error['errorString']))
def _CreateUserIdentifier(identifier_type=None, value=None):
"""Creates a user identifier from the specified type and value.
Args:
identifier_type: a str specifying the type of user identifier.
value: a str value of the identifier; to be hashed using SHA-256 if needed.
Returns:
A dict specifying a user identifier, with a value hashed using SHA-256 if
needed.
"""
if identifier_type in _HASHED_IDENTIFIER_TYPES:
# If the user identifier type is a hashed type, normalize and hash the
# value.
value = hashlib.sha256(value.strip().lower()).hexdigest()
user_identifier = {
'userIdentifierType': identifier_type,
'value': value
}
return user_identifier
def _GetFieldPathElementIndex(api_error, field):
"""Retrieve the index of a given field in the api_error's fieldPathElements.
Args:
api_error: a dict containing a partialFailureError returned from the AdWords
API.
field: a str field for which this determines the index in the api_error's
fieldPathElements.
Returns:
An int index of the field path element, or None if the specified field can't
be found in the api_error.
"""
field_path_elements = api_error['fieldPathElements']
if field_path_elements:
found_index = [field_path_element['index']
for field_path_element in field_path_elements
if field_path_element['field'] == field]
if found_index:
return found_index
return None
def _GetFormattedDateTime(dt):
"""Formats the given datetime and timezone for use with AdWords.
Args:
dt: a datetime instance.
Returns:
A str representation of the datetime in the correct format for AdWords.
"""
return '%s %s' % (datetime.datetime.strftime(dt, _DT_FORMAT), _TIMEZONE.zone)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CONVERSION_NAME, EXTERNAL_UPLOAD_ID,
STORE_SALES_UPLOAD_COMMON_METADATA_TYPE, EMAIL_ADDRESSES,
ADVERTISER_UPLOAD_TIME, BRIDGE_MAP_VERSION_ID, PARTNER_ID)
|
|
#! python
# -*- coding: utf-8 -*-
"""
This program has been used to translate the wav file to dat format.
:author: Daniel C. Pizetta
:contact: daniel.pizetta@usp.br
:since: 2013/05/17
"""
import array
import os
import wave
import sys
from qtpy.QtCore import QObject, Qt
from qtpy.QtWidgets import QDialog, QFileDialog, QMessageBox, QApplication
from wavytool.dlg_wav2dat import Ui_Dialog
qobject = QObject()
class ConvertWave2Data(QDialog):
def __init__(self, parent=None):
"""Constructor."""
# graphical interface
super(ConvertWave2Data, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
# properties
self.path_to_read = ''
self.path_to_write = ''
self.number_of_channels = 0
self.sample_width = 0
self.frame_rate = 0
self.number_of_frames = 0
self.compression_name = None
self.frames = []
# link actions and signals to methods and slots
# to read
self.ui.pushButton_path_to_read.clicked.connect(lambda: self.setPathToReadWave(''))
# options
self.ui.checkBox_same_name.stateChanged.connect(self.changePathToWrite)
# to write
self.ui.pushButton_path_to_write.clicked.connect(lambda: self.setPathToWriteData(''))
self.ui.pushButtonConvert.clicked.connect(lambda: self.writeData(self.path_to_write))
self.ui.pushButtonCancel.clicked.connect(self.close)
def changePathToWrite(self, use_same_path=0):
"""Change the path on the interface."""
# construct the path based on the path to read
if use_same_path == 2:
path = os.path.join(os.path.dirname(self.path_to_read),
os.path.splitext(os.path.basename(self.path_to_read))[0]) + '.dat'
self.setPathToWriteData(os.path.abspath(path))
self.ui.lineEdit_path_to_write.setText(self.path_to_write)
self.ui.lineEdit_path_to_write.setEnabled(False)
self.ui.pushButton_path_to_write.setEnabled(False)
else:
self.ui.lineEdit_path_to_write.setText('')
self.ui.lineEdit_path_to_write.setEnabled(True)
self.ui.pushButton_path_to_write.setEnabled(True)
def setPathToReadWave(self, path=''):
"""Set path to read."""
if not path:
path = QFileDialog.getOpenFileName(self,
self.tr("Load WAVE file"),
self.tr("WAVE File (*.wav)"))[0]
# set path
self.path_to_read = os.path.abspath(path)
# set path on the graphical interface
self.ui.lineEdit_path_to_read.setText(self.path_to_read)
# read the wav file
self.readWave()
def setPathToWriteData(self, path=''):
"""Set path to write."""
if not path:
path = QFileDialog.getOpenFileName(self,
self.tr("Create DATA file"),
self.path_to_read,
self.tr("DATA File (*.dat)"))[0]
# set path
self.path_to_write = os.path.abspath(path)
# set path on the graphical interface
self.ui.lineEdit_path_to_write.setText(self.path_to_write)
def readWave(self):
"""Read the wav file."""
# Reading the file and load the values
try:
wav_file = wave.open(self.path_to_read, 'rb')
except Exception as e:
QMessageBox.critical(QApplication.topLevelWidgets()[0],
qobject.tr('Problem to read WAVE file.'),
qobject.tr('Error: {}').format(str(e)),
QMessageBox.Ok)
# Extracting informations
try:
self.number_of_channels = wav_file.getnchannels()
self.sample_width = wav_file.getsampwidth()
self.frame_rate = wav_file.getframerate()
self.number_of_frames = wav_file.getnframes()
self.compression_name = wav_file.getcompname()
except Exception as e:
QMessageBox.critical(QApplication.topLevelWidgets()[0],
qobject.tr('Problem with WAVE properties.'),
qobject.tr('Error: {}').format(str(e)),
QMessageBox.Ok)
# Extracting data
try:
self.frames = wav_file.readframes(self.number_of_frames)
except Exception as e:
QMessageBox.critical(QApplication.topLevelWidgets()[0],
qobject.tr('Problem with WAVE data.'),
qobject.tr('Error: {}').format(str(e)),
QMessageBox.Ok)
# set information on the interface
self.ui.label_file_name.setText(str(os.path.splitext(os.path.basename(self.path_to_read))[0]))
self.ui.label_size.setText(str(self.sample_width * self.number_of_frames + 44))
self.ui.label_number_of_channels.setText(str(self.number_of_channels))
self.ui.label_frame_rate.setText(str(self.frame_rate))
self.ui.label_number_of_frames.setText(str(self.number_of_frames))
self.ui.label_sample_width.setText(str(self.sample_width))
self.ui.label_compression.setText(str(self.compression_name))
wav_file.close()
def information(self):
"""Get information about the file."""
dic_information = {}
dic_information['Number of Channels'] = self.number_of_channels
dic_information['Sample Width'] = self.sample_width
dic_information['Frame Rate'] = self.frame_rate
dic_information['Number of Frames'] = self.number_of_frames
dic_information['Compression Name'] = self.compression_name
return dic_information
def data(self):
"""Get data in the file."""
return array.array('f', self.frames)
def writeData(self, path=''):
"""Write dat file."""
self.ui.progressBar.setRange(0, self.number_of_frames)
if self.path_to_write == '':
self.setPathToWriteData()
file_ = open(self.path_to_write, 'wb')
for index in range(0, self.number_of_frames + 2, 2):
self.ui.progressBar.setValue(index)
file_.write(str(float(index) * (1.0 / self.frame_rate)) +
' ' + str(SLInt16("foo").parse(self.frames[index:index + 2])))
QMessageBox.information(QApplication.topLevelWidgets()[0],
qobject.tr('Information.'),
qobject.tr('Conversion concluded successfully.'),
QMessageBox.Ok)
file_.close()
if self.ui.checkBox_information.isChecked is True:
return True
return True
def main():
app = QApplication(sys.argv)
window = ConvertWave2Data()
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import datetime
import dateutil.parser
import dateutil.tz
import gzip
import json
import logging
import pickle
import sys
from collections import OrderedDict
from functools import partial
from itertools import count, groupby
from six.moves.urllib.request import urlopen
logging.basicConfig(level=logging.INFO)
API_PARAMS = {
'base_url': 'https://api.github.com/repos',
'owner': 'bokeh',
'repo': 'bokeh',
}
IGNORE_ISSUE_TYPE = {
'type: discussion',
'type: tracker',
}
LOG_SECTION = OrderedDict([ # issue type label -> log section heading
('type: bug', 'bugfixes'),
('type: feature', 'features'),
('type: task', 'tasks'),
])
ISSUES_SORT_KEY = lambda issue: (issue_section_order(issue), int(issue['number']))
ISSUES_BY_SECTION = lambda issue: issue_section(issue)
#######################################
# Object Storage
#######################################
def save_object(filename, obj):
"""Compresses and pickles given object to the given filename."""
logging.info('saving {}...'.format(filename))
try:
with gzip.GzipFile(filename, 'wb') as f:
f.write(pickle.dumps(obj, 1))
except Exception as e:
logging.error('save failure: {}'.format(e))
raise
def load_object(filename):
"""Unpickles and decompresses the given filename and returns the created object."""
logging.info('loading {}...'.format(filename))
try:
with gzip.GzipFile(filename, 'rb') as f:
buf = ''
while True:
data = f.read()
if data == '':
break
buf += data
return pickle.loads(buf)
except Exception as e:
logging.error('load failure: {}'.format(e))
raise
#######################################
# Issues
#######################################
def issue_section_order(issue):
"""Returns the section order for the given issue."""
try:
return LOG_SECTION.values().index(issue_section(issue))
except:
return -1
def issue_completed(issue):
"""Returns True iff this issue is has been resolved as completed."""
labels = issue.get('labels', [])
return any(label['name'] == 'reso: completed' for label in labels)
def issue_section(issue):
"""Returns the section heading for the issue, or None if this issue should be ignored."""
labels = issue.get('labels', [])
for label in labels:
if not label['name'].startswith('type: '):
continue
if label['name'] in LOG_SECTION:
return LOG_SECTION[label['name']]
elif label['name'] in IGNORE_ISSUE_TYPE:
return None
else:
logging.warn('unknown issue type: "{}" for: {}'.format(label['name'], issue_line(issue)))
return None
def issue_tags(issue):
"""Returns list of tags for this issue."""
labels = issue.get('labels', [])
return [label['name'].replace('tag: ', '') for label in labels if label['name'].startswith('tag: ')]
def closed_issue(issue, after=None):
"""Returns True iff this issue was closed after given date. If after not given, only checks if issue is closed."""
if issue['state'] == 'closed':
if after is None or parse_timestamp(issue['closed_at']) > after:
return True
return False
def relevent_issue(issue, after):
"""Returns True iff this issue is something we should show in the changelog."""
return (closed_issue(issue, after) and
issue_completed(issue) and
issue_section(issue))
def relevant_issues(issues, after):
"""Yields relevant closed issues (closed after a given datetime) given a list of issues."""
logging.info('finding relevant issues after {}...'.format(after))
seen = set()
for issue in issues:
if relevent_issue(issue, after) and issue['title'] not in seen:
seen.add(issue['title'])
yield issue
def closed_issues(issues, after):
"""Yields closed issues (closed after a given datetime) given a list of issues."""
logging.info('finding closed issues after {}...'.format(after))
seen = set()
for issue in issues:
if closed_issue(issue, after) and issue['title'] not in seen:
seen.add(issue['title'])
yield issue
def all_issues(issues):
"""Yields unique set of issues given a list of issues."""
logging.info('finding issues...')
seen = set()
for issue in issues:
if issue['title'] not in seen:
seen.add(issue['title'])
yield issue
#######################################
# GitHub API
#######################################
def get_labels_url():
"""Returns github API URL for querying labels."""
return '{base_url}/{owner}/{repo}/labels'.format(**API_PARAMS)
def get_issues_url(page, after):
"""Returns github API URL for querying tags."""
template = '{base_url}/{owner}/{repo}/issues?state=closed&per_page=100&page={page}&since={after}'
return template.format(page=page, after=after.isoformat(), **API_PARAMS)
def get_tags_url():
"""Returns github API URL for querying tags."""
return '{base_url}/{owner}/{repo}/tags'.format(**API_PARAMS)
def parse_timestamp(timestamp):
"""Parse ISO8601 timestamps given by github API."""
dt = dateutil.parser.parse(timestamp)
return dt.astimezone(dateutil.tz.tzutc())
def read_url(url):
"""Reads given URL as JSON and returns data as loaded python object."""
logging.debug('reading {url} ...'.format(url=url))
r = urlopen(url).read()
return json.loads(r.decode("UTF-8"))
def query_tags():
"""Hits the github API for repository tags and returns the data."""
return read_url(get_tags_url())
def query_issues(page, after):
"""Hits the github API for a single page of closed issues and returns the data."""
return read_url(get_issues_url(page, after))
def query_all_issues(after):
"""Hits the github API for all closed issues after the given date, returns the data."""
page = count(1)
data = []
while True:
page_data = query_issues(next(page), after)
if not page_data:
break
data.extend(page_data)
return data
def dateof(tag_name, tags):
"""Given a list of tags, returns the datetime of the tag with the given name; Otherwise None."""
for tag in tags:
if tag['name'] == tag_name:
commit = read_url(tag['commit']['url'])
return parse_timestamp(commit['commit']['committer']['date'])
return None
def get_data(query_func, load_data=False, save_data=False):
"""Gets data from query_func, optionally saving that data to a file; or loads data from a file."""
if hasattr(query_func, '__name__'):
func_name = query_func.__name__
elif hasattr(query_func, 'func'):
func_name = query_func.func.__name__
pickle_file = '{}.pickle'.format(func_name)
if load_data:
data = load_object(pickle_file)
else:
data = query_func()
if save_data:
save_object(pickle_file, data)
return data
#######################################
# Validation
#######################################
def check_issue(issue, after):
labels = issue.get('labels', [])
if not any(label['name'].startswith('type: ') for label in labels):
logging.warn('issue with no type label: {}'.format(issue_line((issue))))
if closed_issue(issue, after):
if not any(label['name'].startswith('reso: ') for label in labels):
if not any(label['name'] in IGNORE_ISSUE_TYPE for label in labels):
logging.warn('closed issue with no reso label: {}'.format(issue_line((issue))))
if 'pull_request' in issue:
if not any(label['name'].startswith('status: ') for label in labels):
logging.warn('pull request without status label: {}'.format(issue_line(issue)))
def check_issues(issues, after=None):
"""Checks issues for BEP 1 compliance."""
issues = closed_issues(issues, after) if after else all_issues(issues)
issues = sorted(issues, key=ISSUES_SORT_KEY)
for section, issue_group in groupby(issues, key=ISSUES_BY_SECTION):
for issue in issue_group:
check_issue(issue, after)
#######################################
# Changelog
#######################################
def issue_line(issue):
"""Returns log line for given issue."""
template = '#{number} {tags}{title}'
tags = issue_tags(issue)
params = {
'title': issue['title'].capitalize().rstrip('.'),
'number': issue['number'],
'tags': ' '.join('[{}]'.format(tag) for tag in tags) + (' ' if tags else '')
}
return template.format(**params)
def generate_changelog(issues, after, heading, rtag=False):
"""Prints out changelog."""
relevent = relevant_issues(issues, after)
relevent = sorted(relevent, key=ISSUES_BY_SECTION)
def write(func, endofline="", append=""):
func(heading + '\n' + '-' * 20 + endofline)
for section, issue_group in groupby(relevent, key=ISSUES_BY_SECTION):
func(' * {}:'.format(section) + endofline)
for issue in reversed(list(issue_group)):
func(' - {}'.format(issue_line(issue)) + endofline)
func(endofline + append)
if rtag is not False:
with open("../CHANGELOG", "r+") as f:
content = f.read()
f.seek(0)
write(f.write, '\n', content)
# Insert the summary points from the <tag>.rst file into the CHANGELOG
flines = []
with open("../CHANGELOG", "r") as f:
flines = f.readlines()
with open("../sphinx/source/docs/releases/" + rtag + ".rst", "r") as f:
flines[2:2] = [" " + line for line in f.readlines() if line.startswith("*")]
with open("../CHANGELOG", "w") as f:
f.writelines(flines)
else:
write(print)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Creates a bokeh changelog using the github API.')
limit_group = parser.add_mutually_exclusive_group(required=True)
limit_group.add_argument('-d', '--since-date', metavar='DATE',
help='select issues that occurred after the given ISO8601 date')
limit_group.add_argument('-p', '--since-tag', metavar='TAG',
help='select issues that occurred after the given git tag')
parser.add_argument('-c', '--check', action='store_true', default=False,
help='check closed issues for BEP 1 compliance')
parser.add_argument('-r', '--release-tag', metavar='RELEASE',
help='the proposed new release tag.\n'
'NOTE: this will automatically write the output to the CHANGELOG')
data_group = parser.add_mutually_exclusive_group()
data_group.add_argument('-s', '--save-data', action='store_true', default=False,
help='save api query result data; useful for testing')
data_group.add_argument('-l', '--load-data', action='store_true', default=False,
help='load api data from previously saved data; useful for testing')
args = parser.parse_args()
if args.since_tag:
tags = get_data(query_tags, load_data=args.load_data, save_data=args.save_data)
after = dateof(args.since_tag, tags)
heading = 'Since {:>14}:'.format(args.since_tag)
elif args.since_date:
after = dateutil.parser.parse(args.since_date)
after = after.replace(tzinfo=dateutil.tz.tzlocal())
heading = 'Since {:>14}:'.format(after.date().isoformat())
issues = get_data(partial(query_all_issues, after), load_data=args.load_data, save_data=args.save_data)
if args.check:
check_issues(issues)
if args.release_tag:
heading = '{} {:>8}:'.format(str(datetime.date.today()), args.release_tag)
generate_changelog(issues, after, heading, args.release_tag)
else:
generate_changelog(issues, after, heading)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
import webob
from nova.api.openstack.compute import servers as servers_v21
from nova.compute import api as compute_api
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.image import glance
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
from nova.tests import uuidsentinel as uuids
CONF = nova.conf.CONF
FAKE_UUID = fakes.FAKE_UUID
INSTANCE_IDS = {FAKE_UUID: 1}
def return_server_not_found(*arg, **kwarg):
raise exception.InstanceNotFound(instance_id=FAKE_UUID)
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None,
):
inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
inst = dict(inst, **values)
return (inst, inst)
def instance_update(context, instance_uuid, kwargs):
inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
return inst
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance, password):
self.instance_id = instance['uuid']
self.password = password
class ServerActionsControllerTestV21(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_base_url = 'http://localhost:9292/images/'
image_href = image_base_url + '/' + image_uuid
servers = servers_v21
validation_error = exception.ValidationError
request_too_large_error = exception.ValidationError
image_url = None
def setUp(self):
super(ServerActionsControllerTestV21, self).setUp()
self.flags(group='glance', api_servers=['http://localhost:9292'])
self.stub_out('nova.db.instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
host='fake_host'))
self.stub_out('nova.db.instance_update_and_get_original',
instance_update_and_get_original)
fakes.stub_out_nw_api(self)
fakes.stub_out_compute_api_snapshot(self.stubs)
fake.stub_out_image_service(self)
self.flags(allow_instance_snapshots=True,
enable_instance_password=True,
group='api')
self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
self.controller = self._get_controller()
self.compute_api = self.controller.compute_api
self.req = fakes.HTTPRequest.blank('')
self.context = self.req.environ['nova.context']
def _get_controller(self):
return self.servers.ServersController()
def _set_fake_extension(self):
pass
def _test_locked_instance(self, action, method=None, body_map=None,
compute_api_args_map=None):
if body_map is None:
body_map = {}
if compute_api_args_map is None:
compute_api_args_map = {}
args, kwargs = compute_api_args_map.get(action, ((), {}))
uuid = uuidutils.generate_uuid()
context = self.req.environ['nova.context']
instance = fake_instance.fake_db_instance(
id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None,
project_id=context.project_id,
user_id=context.user_id)
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), instance)
with test.nested(
mock.patch.object(compute_api.API, 'get',
return_value=instance),
mock.patch.object(compute_api.API, method,
side_effect=exception.InstanceIsLocked(
instance_uuid=instance['uuid'])),
) as (mock_get, mock_method):
controller_function = 'self.controller.' + action
self.assertRaises(webob.exc.HTTPConflict,
eval(controller_function),
self.req, instance['uuid'],
body=body_map.get(action))
mock_get.assert_called_once_with(self.context, uuid,
expected_attrs=['flavor', 'numa_topology'])
mock_method.assert_called_once_with(self.context, instance,
*args, **kwargs)
def test_actions_with_locked_instance(self):
actions = ['_action_resize', '_action_confirm_resize',
'_action_revert_resize', '_action_reboot',
'_action_rebuild']
method_translations = {'_action_resize': 'resize',
'_action_confirm_resize': 'confirm_resize',
'_action_revert_resize': 'revert_resize',
'_action_reboot': 'reboot',
'_action_rebuild': 'rebuild'}
body_map = {'_action_resize': {'resize': {'flavorRef': '2'}},
'_action_reboot': {'reboot': {'type': 'HARD'}},
'_action_rebuild': {'rebuild': {
'imageRef': self.image_uuid,
'adminPass': 'TNc53Dr8s7vw'}}}
args_map = {'_action_resize': (('2'), {}),
'_action_confirm_resize': ((), {}),
'_action_reboot': (('HARD',), {}),
'_action_rebuild': ((self.image_uuid,
'TNc53Dr8s7vw'), {})}
for action in actions:
method = method_translations.get(action)
self._test_locked_instance(action, method=method,
body_map=body_map,
compute_api_args_map=args_map)
def test_reboot_hard(self):
body = dict(reboot=dict(type="HARD"))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_soft(self):
body = dict(reboot=dict(type="SOFT"))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_incorrect_type(self):
body = dict(reboot=dict(type="NOT_A_TYPE"))
self.assertRaises(self.validation_error,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_missing_type(self):
body = dict(reboot=dict())
self.assertRaises(self.validation_error,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_none(self):
body = dict(reboot=dict(type=None))
self.assertRaises(self.validation_error,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_not_found(self):
self.stub_out('nova.db.instance_get_by_uuid',
return_server_not_found)
body = dict(reboot=dict(type="HARD"))
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_reboot,
self.req, uuids.fake, body=body)
def test_reboot_raises_conflict_on_invalid_state(self):
body = dict(reboot=dict(type="HARD"))
def fake_reboot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.reboot', fake_reboot)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
body = dict(reboot=dict(type="SOFT"))
self.stub_out('nova.db.instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
body = dict(reboot=dict(type="HARD"))
self.stub_out('nova.db.instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_hard_with_hard_in_progress(self):
body = dict(reboot=dict(type="HARD"))
self.stub_out('nova.db.instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING_HARD))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_soft_with_hard_in_progress_raises_conflict(self):
body = dict(reboot=dict(type="SOFT"))
self.stub_out('nova.db.instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING_HARD))
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
def _test_rebuild_preserve_ephemeral(self, value=None):
self._set_fake_extension()
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE,
host='fake_host')
self.stub_out('nova.db.instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
if value is not None:
body['rebuild']['preserve_ephemeral'] = value
with mock.patch.object(compute_api.API, 'rebuild') as mock_rebuild:
self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
if value is not None:
mock_rebuild.assert_called_once_with(self.context, mock.ANY,
self._image_href, mock.ANY, preserve_ephemeral=value)
else:
mock_rebuild.assert_called_once_with(self.context, mock.ANY,
self._image_href, mock.ANY)
def test_rebuild_preserve_ephemeral_true(self):
self._test_rebuild_preserve_ephemeral(True)
def test_rebuild_preserve_ephemeral_false(self):
self._test_rebuild_preserve_ephemeral(False)
def test_rebuild_preserve_ephemeral_default(self):
self._test_rebuild_preserve_ephemeral()
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.db.instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
robj = self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(len(body['server']['adminPass']),
CONF.password_length)
self.assertEqual(robj['location'], self_href.encode('utf-8'))
def test_rebuild_instance_with_image_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stub_out('nova.db.instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stub_out('nova.compute.api.API.rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
body = {
'rebuild': {
'imageRef': self.image_uuid,
},
}
self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
self.assertEqual(info['image_href_in_call'], self.image_uuid)
def test_rebuild_instance_with_image_href_uses_uuid(self):
# proper local hrefs must start with 'http://localhost/v2/'
body = {
'rebuild': {
'imageRef': self.image_href,
},
}
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_accepted_minimum_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False, group='api')
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.db.instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
robj = self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertNotIn("adminPass", body['server'])
self.assertEqual(robj['location'], self_href.encode('utf-8'))
def test_rebuild_raises_conflict_on_invalid_state(self):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
def fake_rebuild(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.rebuild', fake_rebuild)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_accepted_with_metadata(self):
metadata = {'new': 'metadata'}
return_server = fakes.fake_instance_get(metadata=metadata,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.db.instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": metadata,
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertEqual(body['server']['metadata'], metadata)
def test_rebuild_accepted_with_bad_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": "stack",
},
}
self.assertRaises(self.validation_error,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_with_too_large_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": {
256 * "k": "value"
}
}
}
self.assertRaises(self.request_too_large_error,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=body)
def test_rebuild_bad_entity(self):
body = {
"rebuild": {
"imageId": self._image_href,
},
}
self.assertRaises(self.validation_error,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_admin_pass(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.db.instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(body['server']['adminPass'], 'asdf')
def test_rebuild_admin_pass_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False, group='api')
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stub_out('nova.db.instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertNotIn('adminPass', body['server'])
def test_rebuild_server_not_found(self):
def server_not_found(self, instance_id,
columns_to_join=None, use_slave=False):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stub_out('nova.db.instance_get_by_uuid', server_not_found)
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_with_bad_image(self):
body = {
"rebuild": {
"imageRef": "foo",
},
}
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_accessIP(self):
attributes = {
'access_ip_v4': '172.19.0.1',
'access_ip_v6': 'fe80::1',
}
body = {
"rebuild": {
"imageRef": self._image_href,
"accessIPv4": "172.19.0.1",
"accessIPv6": "fe80::1",
},
}
data = {'changes': {}}
orig_get = compute_api.API.get
def wrap_get(*args, **kwargs):
data['instance'] = orig_get(*args, **kwargs)
return data['instance']
def fake_save(context, **kwargs):
data['changes'].update(data['instance'].obj_get_changes())
self.stub_out('nova.compute.api.API.get', wrap_get)
self.stub_out('nova.objects.Instance.save', fake_save)
self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
self.assertEqual(self._image_href, data['changes']['image_ref'])
self.assertEqual("", data['changes']['kernel_id'])
self.assertEqual("", data['changes']['ramdisk_id'])
self.assertEqual(task_states.REBUILDING, data['changes']['task_state'])
self.assertEqual(0, data['changes']['progress'])
for attr, value in attributes.items():
self.assertEqual(value, str(data['changes'][attr]))
def test_rebuild_when_kernel_not_exists(self):
def return_image_meta(*args, **kwargs):
image_meta_table = {
'2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': 3, 'status': 'active', 'container_format': 'raw',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
return_image_meta)
body = {
"rebuild": {
"imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
},
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_proper_kernel_ram(self):
instance_meta = {'kernel_id': None, 'ramdisk_id': None}
orig_get = compute_api.API.get
def wrap_get(*args, **kwargs):
inst = orig_get(*args, **kwargs)
instance_meta['instance'] = inst
return inst
def fake_save(context, **kwargs):
instance = instance_meta['instance']
for key in instance_meta.keys():
if key in instance.obj_what_changed():
instance_meta[key] = instance[key]
def return_image_meta(*args, **kwargs):
image_meta_table = {
'1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
'2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': 3, 'status': 'active', 'container_format': 'raw',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
return_image_meta)
self.stub_out('nova.compute.api.API.get', wrap_get)
self.stub_out('nova.objects.Instance.save', fake_save)
body = {
"rebuild": {
"imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
},
}
self.controller._action_rebuild(self.req, FAKE_UUID, body=body).obj
self.assertEqual(instance_meta['kernel_id'], '1')
self.assertEqual(instance_meta['ramdisk_id'], '2')
@mock.patch.object(compute_api.API, 'rebuild')
def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
mock_rebuild.side_effect = exception.AutoDiskConfigDisabledByImage(
image='dummy')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_resize_server(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stub_out('nova.compute.api.API.resize', resize_mock)
self.controller._action_resize(self.req, FAKE_UUID, body=body)
self.assertTrue(self.resize_called)
def test_resize_server_no_flavor(self):
body = dict(resize=dict())
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_server_no_flavor_ref(self):
body = dict(resize=dict(flavorRef=None))
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_server_with_extra_arg(self):
body = dict(resize=dict(favorRef="http://localhost/3",
extra_arg="extra_arg"))
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_server_invalid_flavor_ref(self):
body = dict(resize=dict(flavorRef=1.2))
self.assertRaises(self.validation_error,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_with_server_not_found(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.stub_out('nova.compute.api.API.get', return_server_not_found)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_with_image_exceptions(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.resize_called = 0
image_id = 'fake_image_id'
exceptions = [
(exception.ImageNotAuthorized(image_id=image_id),
webob.exc.HTTPUnauthorized),
(exception.ImageNotFound(image_id=image_id),
webob.exc.HTTPBadRequest),
(exception.Invalid, webob.exc.HTTPBadRequest),
(exception.NoValidHost(reason='Bad host'),
webob.exc.HTTPBadRequest),
(exception.AutoDiskConfigDisabledByImage(image=image_id),
webob.exc.HTTPBadRequest),
]
raised, expected = map(iter, zip(*exceptions))
def _fake_resize(obj, context, instance, flavor_id):
self.resize_called += 1
raise next(raised)
self.stub_out('nova.compute.api.API.resize', _fake_resize)
for call_no in range(len(exceptions)):
next_exception = next(expected)
actual = self.assertRaises(next_exception,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
if (isinstance(exceptions[call_no][0],
exception.NoValidHost)):
self.assertEqual(actual.explanation,
'No valid host was found. Bad host')
elif (isinstance(exceptions[call_no][0],
exception.AutoDiskConfigDisabledByImage)):
self.assertEqual(actual.explanation,
'Requested image fake_image_id has automatic'
' disk resize disabled.')
self.assertEqual(self.resize_called, call_no + 1)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.CannotResizeDisk(reason=''))
def test_resize_raises_cannot_resize_disk(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.FlavorNotFound(reason='',
flavor_id='fake_id'))
def test_resize_raises_flavor_not_found(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_with_too_many_instances(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.TooManyInstances(message="TooManyInstance")
self.stub_out('nova.compute.api.API.resize', fake_resize)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_resize_raises_conflict_on_invalid_state(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.resize', fake_resize)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.NoValidHost(reason=''))
def test_resize_raises_no_valid_host(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch.object(compute_api.API, 'resize')
def test_resize_instance_raise_auto_disk_config_exc(self, mock_resize):
mock_resize.side_effect = exception.AutoDiskConfigDisabledByImage(
image='dummy')
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.PciRequestAliasNotDefined(
alias='fake_name'))
def test_resize_pci_alias_not_defined(self, mock_resize):
# Tests that PciRequestAliasNotDefined is translated to a 400 error.
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
def test_confirm_resize_server(self):
body = dict(confirmResize=None)
self.confirm_resize_called = False
def cr_mock(*args):
self.confirm_resize_called = True
self.stub_out('nova.compute.api.API.confirm_resize', cr_mock)
self.controller._action_confirm_resize(self.req, FAKE_UUID, body=body)
self.assertTrue(self.confirm_resize_called)
def test_confirm_resize_migration_not_found(self):
body = dict(confirmResize=None)
def confirm_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stub_out('nova.compute.api.API.confirm_resize',
confirm_resize_mock)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_confirm_resize,
self.req, FAKE_UUID, body=body)
def test_confirm_resize_raises_conflict_on_invalid_state(self):
body = dict(confirmResize=None)
def fake_confirm_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.confirm_resize',
fake_confirm_resize)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_confirm_resize,
self.req, FAKE_UUID, body=body)
def test_revert_resize_migration_not_found(self):
body = dict(revertResize=None)
def revert_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stub_out('nova.compute.api.API.revert_resize',
revert_resize_mock)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_revert_resize,
self.req, FAKE_UUID, body=body)
def test_revert_resize_server_not_found(self):
body = dict(revertResize=None)
self.assertRaises(webob. exc.HTTPNotFound,
self.controller._action_revert_resize,
self.req, "bad_server_id", body=body)
def test_revert_resize_server(self):
body = dict(revertResize=None)
self.revert_resize_called = False
def revert_mock(*args):
self.revert_resize_called = True
self.stub_out('nova.compute.api.API.revert_resize', revert_mock)
body = self.controller._action_revert_resize(self.req, FAKE_UUID,
body=body)
self.assertTrue(self.revert_resize_called)
def test_revert_resize_raises_conflict_on_invalid_state(self):
body = dict(revertResize=None)
def fake_revert_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.revert_resize',
fake_revert_resize)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_revert_resize,
self.req, FAKE_UUID, body=body)
def test_create_image(self):
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
response = self.controller._action_create_image(self.req, FAKE_UUID,
body=body)
location = response.headers['Location']
self.assertEqual(self.image_url + '123' if self.image_url else
glance.generate_image_url('123'),
location)
def test_create_image_v2_45(self):
"""Tests the createImage server action API with the 2.45 microversion
where there is a response body but no Location header.
"""
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank('', version='2.45')
response = self.controller._action_create_image(req, FAKE_UUID,
body=body)
self.assertIsInstance(response, dict)
self.assertEqual('123', response['image_id'])
def test_create_image_name_too_long(self):
long_name = 'a' * 260
body = {
'createImage': {
'name': long_name,
},
}
self.assertRaises(self.validation_error,
self.controller._action_create_image, self.req,
FAKE_UUID, body=body)
def _do_test_create_volume_backed_image(
self, extra_properties, mock_vol_create_side_effect=None):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(createImage=dict(name='snapshot_of_volume_backed'))
if extra_properties:
body['createImage']['metadata'] = extra_properties
image_service = glance.get_default_image_service()
bdm = [dict(volume_id=_fake_id('a'),
volume_size=1,
device_name='vda',
delete_on_termination=False)]
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
system_metadata = dict(image_kernel_id=_fake_id('b'),
image_ramdisk_id=_fake_id('c'),
image_root_device_name='/dev/vda',
image_block_device_mapping=str(bdm),
image_container_format='ami')
instance = fakes.fake_instance_get(image_ref=uuids.fake,
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda',
system_metadata=system_metadata)
self.stub_out('nova.db.instance_get_by_uuid', instance)
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake')
snapshot = dict(id=_fake_id('d'))
with test.nested(
mock.patch.object(self.controller.compute_api.compute_rpcapi,
'quiesce_instance',
side_effect=exception.InstanceQuiesceNotSupported(
instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
) as (mock_quiesce, mock_vol_get, mock_vol_create):
if mock_vol_create_side_effect:
mock_vol_create.side_effect = mock_vol_create_side_effect
response = self.controller._action_create_image(self.req,
FAKE_UUID, body=body)
location = response.headers['Location']
image_id = location.replace(self.image_url or
glance.generate_image_url(''), '')
image = image_service.show(None, image_id)
self.assertEqual(image['name'], 'snapshot_of_volume_backed')
properties = image['properties']
self.assertEqual(properties['kernel_id'], _fake_id('b'))
self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
self.assertEqual(properties['root_device_name'], '/dev/vda')
self.assertTrue(properties['bdm_v2'])
bdms = properties['block_device_mapping']
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['boot_index'], 0)
self.assertEqual(bdms[0]['source_type'], 'snapshot')
self.assertEqual(bdms[0]['destination_type'], 'volume')
self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
self.assertEqual('/dev/vda', bdms[0]['device_name'])
for fld in ('connection_info', 'id', 'instance_uuid'):
self.assertNotIn(fld, bdms[0])
for k in extra_properties.keys():
self.assertEqual(properties[k], extra_properties[k])
mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
def test_create_volume_backed_image_no_metadata(self):
self._do_test_create_volume_backed_image({})
def test_create_volume_backed_image_with_metadata(self):
self._do_test_create_volume_backed_image(dict(ImageType='Gold',
ImageVersion='2.0'))
def test_create_volume_backed_image_cinder_over_quota(self):
self.assertRaises(
webob.exc.HTTPForbidden,
self._do_test_create_volume_backed_image, {},
mock_vol_create_side_effect=exception.OverQuota(
overs='snapshot'))
def _test_create_volume_backed_image_with_metadata_from_volume(
self, extra_metadata=None):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(createImage=dict(name='snapshot_of_volume_backed'))
if extra_metadata:
body['createImage']['metadata'] = extra_metadata
image_service = glance.get_default_image_service()
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_instance_get(
image_ref='',
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda',
system_metadata={'image_test_key1': 'test_value1',
'image_test_key2': 'test_value2'})
self.stub_out('nova.db.instance_get_by_uuid', instance)
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake')
snapshot = dict(id=_fake_id('d'))
with test.nested(
mock.patch.object(self.controller.compute_api.compute_rpcapi,
'quiesce_instance',
side_effect=exception.InstanceQuiesceNotSupported(
instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
) as (mock_quiesce, mock_vol_get, mock_vol_create):
response = self.controller._action_create_image(self.req,
FAKE_UUID, body=body)
location = response.headers['Location']
image_id = location.replace(self.image_base_url, '')
image = image_service.show(None, image_id)
properties = image['properties']
self.assertEqual(properties['test_key1'], 'test_value1')
self.assertEqual(properties['test_key2'], 'test_value2')
if extra_metadata:
for key, val in extra_metadata.items():
self.assertEqual(properties[key], val)
mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume()
def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume(
extra_metadata={'a': 'b'})
def test_create_image_snapshots_disabled(self):
"""Don't permit a snapshot if the allow_instance_snapshots flag is
False
"""
self.flags(allow_instance_snapshots=False, group='api')
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_with_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {'key': 'asdf'},
},
}
response = self.controller._action_create_image(self.req, FAKE_UUID,
body=body)
location = response.headers['Location']
self.assertEqual(self.image_url + '123' if self.image_url else
glance.generate_image_url('123'), location)
def test_create_image_with_too_much_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {},
},
}
for num in range(CONF.quota.metadata_items + 1):
body['createImage']['metadata']['foo%i' % num] = "bar"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_no_name(self):
body = {
'createImage': {},
}
self.assertRaises(self.validation_error,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_blank_name(self):
body = {
'createImage': {
'name': '',
}
}
self.assertRaises(self.validation_error,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_bad_metadata(self):
body = {
'createImage': {
'name': 'geoff',
'metadata': 'henry',
},
}
self.assertRaises(self.validation_error,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def test_create_image_raises_conflict_on_invalid_state(self):
def snapshot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stub_out('nova.compute.api.API.snapshot', snapshot)
body = {
"createImage": {
"name": "test_snapshot",
},
}
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
|
|
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from . decompiler_common import TestDecompiler, Dummy
class TestTupleDeompile(TestDecompiler):
s = """
def f(a, b, c):
return c, b, a
"""
inputs = [[1, 2, 3], [True, False, False]]
class TestSwapDeompile(TestDecompiler):
s = """
def f(a, b, c):
t = (a, b, c)
d, e, f = t
return d+e-f
"""
inputs = [[1, 2, 3], [2, 3, 1], [3, 1, 2]]
class TestListMutate1Deompile(TestDecompiler):
s = """
def f():
l = [-3, -2, -1]
l[0] = 3
l[1] = 4
l[2] = 5
return l[2], l[1], l[0]
"""
inputs = [[]]
class TestListMutate2Deompile(TestDecompiler):
s = """
def f(a, b, c):
temp = (b+b)
l = [-(c+b), -temp, -(b+c)]
l[a+a] = b+c
l[a+b] = b+b
l[b+b] = c+c+b
return l[2], l[1], l[0]
"""
inputs = [[0, 1, 2]]
class TestSubscriptMungeDeompile(TestDecompiler):
s = """
def f(a, b, c):
temp = ((0, 1), (2, 3))
return temp[a+b][a+a]
"""
inputs = [[0, 1, 2]]
class TestDictDecompile(TestDecompiler):
s = """
def f(s):
d = {'a':1, 'b':2, 'c':3, 'd':4}
return d[s]
"""
inputs = [['a'],['b'],['c'],['d']]
class TestDeleteSubscriptDecompile(TestDecompiler):
s = """
def f(s):
d = {'a':1, 'b':2, 'c':3, 'd':4}
a = s in d
del d[s]
b = s in d
return a, b
"""
inputs = [['a'],['b'],['c'],['d']]
class TestDictFlip(TestDecompiler):
s = """
def f(d):
nd = {}
for key, value in d.iteritems():
nd[value] = key
return nd
"""
inputs = [[{'a':'1', 'b':'2', 'c':'3', 'd':'4'}]]
class TestGetSlice_3_1_Decompile(TestDecompiler):
s = """
def f(a, b, c):
l = range(20)
return l[a:b:c]
"""
inputs = [[0, 20, 2], [1, 20, 2]]
class TestGetSlice_3_2_Decompile(TestDecompiler):
s = """
def f(a, c):
l = range(20)
return l[a::c]
"""
inputs = [[0, 2], [1, 2]]
class TestGetSlice_2_1_Decompile(TestDecompiler):
s = """
def f(a, c):
l = range(20)
return l[a:c]
"""
inputs = [[3, 11], [7, 13]]
class TestGetSlice_1_1_Decompile(TestDecompiler):
s = """
def f(a):
l = range(20)
return l[a:]
"""
inputs = [[3], [13]]
class TestGetSlice_1_2_Decompile(TestDecompiler):
s = """
def f(a):
l = range(20)
return l[:a]
"""
inputs = [[3], [13]]
class TestGetSlice_0_1_Decompile(TestDecompiler):
s = """
def f():
l = range(20)
return l[:]
"""
inputs = [[]]
class TestSetSlice_3_1_Decompile(TestDecompiler):
s = """
def f():
l = range(20)
l[1:20:2] = range(10)
return l
"""
inputs = [[]]
class TestSetSlice_2_1_Decompile(TestDecompiler):
s = """
def f():
l = range(20)
l[1:5] = range(10)
return l
"""
inputs = [[]]
class TestSetSlice_2_2_Decompile(TestDecompiler):
s = """
def f():
l = range(20)
l[3:] = range(10)
return l
"""
inputs = [[]]
class TestSetSlice_2_3_Decompile(TestDecompiler):
s = """
def f():
l = range(20)
l[:15] = range(10)
return l
"""
inputs = [[]]
class TestSetSlice_1_1_Decompile(TestDecompiler):
s = """
def f():
l = range(20)
l[:] = range(10)
return l
"""
inputs = [[]]
class TestDeleteSlice_3_1(TestDecompiler):
s = """
def f(a, b, c):
l = range(20)
del l[a:b:c]
return l
"""
inputs = [[1, 10, 2], [5, 19, 3]]
class TestDeleteSlice_2_1(TestDecompiler):
s = """
def f(a, b):
l = range(20)
del l[a:b]
return l
"""
inputs = [[1, 10], [5, 15]]
class TestDeleteSlice_1_1(TestDecompiler):
s = """
def f(a):
l = range(20)
del l[a:]
return l
"""
inputs = [[3], [15]]
class TestDeleteSlice_1_2(TestDecompiler):
s = """
def f(a):
l = range(20)
del l[:a]
return l
"""
inputs = [[3], [15]]
class TestDeleteSlice_0_1(TestDecompiler):
s = """
def f():
l = range(20)
del l[:]
return l
"""
inputs = []
# Note: the LIST_APPEND will be on a seperate line, even when collapsed, due to order of operations.
class TestListComprehension(TestDecompiler):
s = """
def listcomp(a):
l = [i*i for i in a]
return l
"""
inputs =[[[3, 7, 5]], [[11, -3, 2]]]
# Note: tuple comprehensions create generators.
class TestTupleComprehension(TestDecompiler):
s = """
def tuplecomp(a):
l = (i*i for i in a)
return list(l)
"""
inputs =[[(3, 7, 5)], [(11, -3, 2)]]
# Tuple comprehension?
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Boot session from cache or build
Session bootstraps info needed by common client side activities including
permission, homepage, default variables, system defaults etc
"""
import frappe, json
from frappe import _
import frappe.utils
from frappe.utils import cint, cstr
import frappe.model.meta
import frappe.defaults
import frappe.translate
from frappe.utils.change_log import get_change_log
import redis
from urllib import unquote
@frappe.whitelist()
def clear(user=None):
frappe.local.session_obj.update(force=True)
frappe.local.db.commit()
clear_cache(frappe.session.user)
clear_global_cache()
frappe.response['message'] = _("Cache Cleared")
def clear_cache(user=None):
cache = frappe.cache()
groups = ("bootinfo", "user_recent", "user_roles", "user_doc", "lang",
"defaults", "user_permissions", "roles", "home_page")
if user:
for name in groups:
cache.hdel(name, user)
cache.delete_keys("user:" + user)
frappe.defaults.clear_cache(user)
else:
for name in groups:
cache.delete_key(name, user)
clear_global_cache()
frappe.defaults.clear_cache()
def clear_global_cache():
frappe.model.meta.clear_cache()
frappe.cache().delete_value(["app_hooks", "installed_apps",
"app_modules", "module_app", "time_zone", "notification_config"])
frappe.setup_module_map()
def clear_sessions(user=None, keep_current=False):
if not user:
user = frappe.session.user
for sid in frappe.db.sql("""select sid from tabSessions where user=%s and device=%s""",
(user, frappe.session.data.device or "desktop")):
if keep_current and frappe.session.sid==sid[0]:
continue
else:
delete_session(sid[0])
def delete_session(sid=None, user=None):
if not user:
user = hasattr(frappe.local, "session") and frappe.session.user or "Guest"
frappe.cache().hdel("session", sid)
frappe.cache().hdel("last_db_session_update", sid)
frappe.db.sql("""delete from tabSessions where sid=%s""", sid)
def clear_all_sessions():
"""This effectively logs out all users"""
frappe.only_for("Administrator")
for sid in frappe.db.sql_list("select sid from `tabSessions`"):
delete_session(sid)
def clear_expired_sessions():
"""This function is meant to be called from scheduler"""
for device in ("desktop", "mobile"):
for sid in frappe.db.sql_list("""select sid from tabSessions
where TIMEDIFF(NOW(), lastupdate) > TIME(%s)
and device = %s""", (device, get_expiry_period(device))):
delete_session(sid)
def get():
"""get session boot info"""
from frappe.desk.notifications import \
get_notification_info_for_boot, get_notifications
from frappe.boot import get_bootinfo
bootinfo = None
if not getattr(frappe.conf,'disable_session_cache', None):
# check if cache exists
bootinfo = frappe.cache().hget("bootinfo", frappe.session.user)
if bootinfo:
bootinfo['from_cache'] = 1
bootinfo["notification_info"].update(get_notifications())
bootinfo["user"]["recent"] = json.dumps(\
frappe.cache().hget("user_recent", frappe.session.user))
if not bootinfo:
# if not create it
bootinfo = get_bootinfo()
bootinfo["notification_info"] = get_notification_info_for_boot()
frappe.cache().hset("bootinfo", frappe.session.user, bootinfo)
try:
frappe.cache().ping()
except redis.exceptions.ConnectionError:
message = _("Redis cache server not running. Please contact Administrator / Tech support")
if 'messages' in bootinfo:
bootinfo['messages'].append(message)
else:
bootinfo['messages'] = [message]
# check only when clear cache is done, and don't cache this
if frappe.local.request:
bootinfo["change_log"] = get_change_log()
bootinfo["metadata_version"] = frappe.cache().get_value("metadata_version")
if not bootinfo["metadata_version"]:
bootinfo["metadata_version"] = frappe.reset_metadata_version()
for hook in frappe.get_hooks("extend_bootinfo"):
frappe.get_attr(hook)(bootinfo=bootinfo)
bootinfo["lang"] = frappe.translate.get_user_lang()
return bootinfo
class Session:
def __init__(self, user, resume=False, full_name=None, user_type=None):
self.sid = cstr(frappe.form_dict.get('sid') or unquote(frappe.request.cookies.get('sid', 'Guest')))
self.user = user
self.device = frappe.form_dict.get("device") or "desktop"
self.user_type = user_type
self.full_name = full_name
self.data = frappe._dict({'data': frappe._dict({})})
self.time_diff = None
# set local session
frappe.local.session = self.data
if resume:
self.resume()
else:
self.start()
def start(self):
"""start a new session"""
# generate sid
if self.user=='Guest':
sid = 'Guest'
else:
sid = frappe.generate_hash()
self.data.user = self.user
self.data.sid = sid
self.data.data.user = self.user
self.data.data.session_ip = frappe.local.request_ip
if self.user != "Guest":
self.data.data.update({
"last_updated": frappe.utils.now(),
"session_expiry": get_expiry_period(self.device),
"full_name": self.full_name,
"user_type": self.user_type,
"device": self.device,
"session_country": get_geo_ip_country(frappe.local.request_ip)
})
# insert session
if self.user!="Guest":
self.insert_session_record()
# update user
frappe.db.sql("""UPDATE tabUser SET last_login = %s, last_ip = %s
where name=%s""", (frappe.utils.now(), frappe.local.request_ip, self.data['user']))
frappe.db.commit()
def insert_session_record(self):
frappe.db.sql("""insert into tabSessions
(sessiondata, user, lastupdate, sid, status, device)
values (%s , %s, NOW(), %s, 'Active', %s)""",
(str(self.data['data']), self.data['user'], self.data['sid'], self.device))
# also add to memcache
frappe.cache().hset("session", self.data.sid, self.data)
def resume(self):
"""non-login request: load a session"""
import frappe
data = self.get_session_record()
if data:
# set language
self.data.update({'data': data, 'user':data.user, 'sid': self.sid})
self.user = data.user
else:
self.start_as_guest()
if self.sid != "Guest":
frappe.local.user_lang = frappe.translate.get_user_lang(self.data.user)
frappe.local.lang = frappe.local.user_lang
def get_session_record(self):
"""get session record, or return the standard Guest Record"""
from frappe.auth import clear_cookies
r = self.get_session_data()
if not r:
frappe.response["session_expired"] = 1
clear_cookies()
self.sid = "Guest"
r = self.get_session_data()
return r
def get_session_data(self):
if self.sid=="Guest":
return frappe._dict({"user":"Guest"})
data = self.get_session_data_from_cache()
if not data:
data = self.get_session_data_from_db()
return data
def get_session_data_from_cache(self):
data = frappe.cache().hget("session", self.sid)
if data:
data = frappe._dict(data)
session_data = data.get("data", {})
# set user for correct timezone
self.time_diff = frappe.utils.time_diff_in_seconds(frappe.utils.now(),
session_data.get("last_updated"))
expiry = self.get_expiry_in_seconds(session_data.get("session_expiry"))
if self.time_diff > expiry:
self.delete_session()
data = None
return data and data.data
def get_session_data_from_db(self):
rec = frappe.db.sql("""select user, sessiondata
from tabSessions where sid=%s and
TIMEDIFF(NOW(), lastupdate) < TIME(%s)""", (self.sid,
get_expiry_period(self.device)))
if rec:
data = frappe._dict(eval(rec and rec[0][1] or '{}'))
data.user = rec[0][0]
else:
self.delete_session()
data = None
return data
def get_expiry_in_seconds(self, expiry):
if not expiry: return 3600
parts = expiry.split(":")
return (cint(parts[0]) * 3600) + (cint(parts[1]) * 60) + cint(parts[2])
def delete_session(self):
delete_session(self.sid, user=self.user)
def start_as_guest(self):
"""all guests share the same 'Guest' session"""
self.user = "Guest"
self.start()
def update(self, force=False):
"""extend session expiry"""
if (frappe.session['user'] == "Guest" or frappe.form_dict.cmd=="logout"):
return
now = frappe.utils.now()
self.data['data']['last_updated'] = now
self.data['data']['lang'] = unicode(frappe.lang)
# update session in db
last_updated = frappe.cache().hget("last_db_session_update", self.sid)
time_diff = frappe.utils.time_diff_in_seconds(now, last_updated) if last_updated else None
# database persistence is secondary, don't update it too often
updated_in_db = False
if force or (time_diff==None) or (time_diff > 600):
frappe.db.sql("""update tabSessions set sessiondata=%s,
lastupdate=NOW() where sid=%s""" , (str(self.data['data']),
self.data['sid']))
frappe.cache().hset("last_db_session_update", self.sid, now)
updated_in_db = True
# set in memcache
frappe.cache().hset("session", self.sid, self.data)
return updated_in_db
def get_expiry_period(device="desktop"):
if device=="desktop":
key = "session_expiry"
default = "06:00:00"
else:
key = "session_expiry_mobile"
default = "720:00:00"
exp_sec = frappe.defaults.get_global_default(key) or default
# incase seconds is missing
if len(exp_sec.split(':')) == 2:
exp_sec = exp_sec + ':00'
return exp_sec
def get_geo_from_ip(ip_addr):
try:
from geoip import geolite2
return geolite2.lookup(ip_addr)
except ImportError:
return
except ValueError:
return
def get_geo_ip_country(ip_addr):
match = get_geo_from_ip(ip_addr)
if match:
return match.country
|
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2009-2011 by the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains classes and functions for working with chemical species.
From the `IUPAC Compendium of Chemical Terminology
<http://dx.doi.org/10.1351/goldbook>`_, a chemical species is "an
ensemble of chemically identical molecular entities that can explore the same
set of molecular energy levels on the time scale of the experiment". This
definition is purposefully vague to allow the user flexibility in application.
In RMG Py, a chemical species -- a local minimum on a potential energy surface
-- is represented in memory as a :class:`Species` object. This module also
contains the :class:`TransitionState` class for representing chemical reaction
transition states (first-order saddle points on a potential energy surface).
"""
import numpy
import cython
import logging
import rmgpy.quantity as quantity
from rmgpy.molecule import Molecule
from rmgpy.pdep import SingleExponentialDown
from rmgpy.statmech.conformer import Conformer
from rmgpy.thermo import Wilhoit, NASA, ThermoData
#: This dictionary is used to add multiplicity to species label
_multiplicity_labels = {1:'S',2:'D',3:'T',4:'Q',5:'V',}
################################################################################
class SpeciesError(Exception):
"""
An exception class for exceptional behavior that occurs while working with
chemical species. Pass a string describing the circumstances that caused the
exceptional behavior.
"""
pass
################################################################################
class Species(object):
"""
A chemical species, representing a local minimum on a potential energy
surface. The attributes are:
======================= ====================================================
Attribute Description
======================= ====================================================
`index` A unique nonnegative integer index
`label` A descriptive string label
`thermo` The heat capacity model for the species
`conformer` The molecular conformer for the species
`molecule` A list of the :class:`Molecule` objects describing the molecular structure
`transportData` A set of transport collision parameters
`molecularWeight` The molecular weight of the species
`energyTransferModel` The collisional energy transfer model to use
`reactive` ``True`` if the species participates in reaction families, ``False`` if not
Reaction libraries and seed mechanisms that include the species are
always considered regardless of this variable
`props` A generic 'properties' dictionary to store user-defined flags
`aug_inchi` Unique augmented inchi
======================= ====================================================
note: :class:`rmg.model.Species` inherits from this class, and adds some extra methods.
"""
def __init__(self, index=-1, label='', thermo=None, conformer=None,
molecule=None, transportData=None, molecularWeight=None,
energyTransferModel=None, reactive=True, props=None, aug_inchi=None):
self.index = index
self.label = label
self.thermo = thermo
self.conformer = conformer
self.molecule = molecule or []
self.transportData = transportData
self.reactive = reactive
self.molecularWeight = molecularWeight
self.energyTransferModel = energyTransferModel
self.props = props or {}
self.aug_inchi = aug_inchi
# Check multiplicity of each molecule is the same
if molecule is not None and len(molecule)>1:
mult = molecule[0].multiplicity
for m in molecule[1:]:
if mult != m.multiplicity:
raise SpeciesError('Multiplicities of molecules in species {species} do not match.'.format(species=label))
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
object.
"""
string = 'Species('
if self.index != -1: string += 'index={0:d}, '.format(self.index)
if self.label != -1: string += 'label="{0}", '.format(self.label)
if self.thermo is not None: string += 'thermo={0!r}, '.format(self.thermo)
if self.conformer is not None: string += 'conformer={0!r}, '.format(self.conformer)
if len(self.molecule) > 0: string += 'molecule={0!r}, '.format(self.molecule)
if self.transportData is not None: string += 'transportData={0!r}, '.format(self.transportData)
if not self.reactive: string += 'reactive={0}, '.format(self.reactive)
if self.molecularWeight is not None: string += 'molecularWeight={0!r}, '.format(self.molecularWeight)
if self.energyTransferModel is not None: string += 'energyTransferModel={0!r}, '.format(self.energyTransferModel)
string = string[:-2] + ')'
return string
def _repr_png_(self):
if len(self.molecule) > 0:
return self.molecule[0]._repr_png_()
else:
return None
def __str__(self):
"""
Return a string representation of the species, in the form 'label(id)'.
"""
if self.index == -1: return self.label
else: return '{0}({1:d})'.format(self.label, self.index)
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (Species, (self.index, self.label, self.thermo, self.conformer, self.molecule, self.transportData, self.molecularWeight, self.energyTransferModel, self.reactive, self.props))
def getMolecularWeight(self):
return self._molecularWeight
def setMolecularWeight(self, value):
self._molecularWeight = quantity.Mass(value)
molecularWeight = property(getMolecularWeight, setMolecularWeight, """The molecular weight of the species.""")
def generateResonanceIsomers(self):
"""
Generate all of the resonance isomers of this species. The isomers are
stored as a list in the `molecule` attribute. If the length of
`molecule` is already greater than one, it is assumed that all of the
resonance isomers have already been generated.
"""
if len(self.molecule) == 1:
self.molecule = self.molecule[0].generateResonanceIsomers()
def isIsomorphic(self, other):
"""
Return ``True`` if the species is isomorphic to `other`, which can be
either a :class:`Molecule` object or a :class:`Species` object.
"""
if isinstance(other, Molecule):
for molecule in self.molecule:
if molecule.isIsomorphic(other):
return True
elif isinstance(other, Species):
for molecule1 in self.molecule:
for molecule2 in other.molecule:
if molecule1.isIsomorphic(molecule2):
return True
else:
raise ValueError('Unexpected value "{0!r}" for other parameter; should be a Molecule or Species object.'.format(other))
return False
def fromAdjacencyList(self, adjlist):
"""
Load the structure of a species as a :class:`Molecule` object from the
given adjacency list `adjlist` and store it as the first entry of a
list in the `molecule` attribute. Does not generate resonance isomers
of the loaded molecule.
"""
self.molecule = [Molecule().fromAdjacencyList(adjlist)]
# If the first line is a label, then save it to the label attribute
for label in adjlist.splitlines():
if label.strip():
break
else:
label = ''
if len(label.split()) > 0 and not label.split()[0].isdigit():
self.label = label.strip()
# Return a reference to itself so we can use e.g. Species().fromAdjacencyList()
return self
def fromSMILES(self, smiles):
"""
Load the structure of a species as a :class:`Molecule` object from the
given SMILES string `smiles` and store it as the first entry of a
list in the `molecule` attribute. Does not generate resonance isomers
of the loaded molecule.
"""
self.molecule = [Molecule().fromSMILES(smiles)]
# Return a reference to itself so we can use e.g. Species().fromAdjacencyList()
return self
def toAdjacencyList(self):
"""
Return a string containing each of the molecules' adjacency lists.
"""
output = '\n\n'.join([m.toAdjacencyList(label=self.label, removeH=False) for m in self.molecule])
return output
def toChemkin(self):
"""
Return the chemkin-formatted string for this species.
"""
from rmgpy.chemkin import getSpeciesIdentifier
return getSpeciesIdentifier(self)
def toCantera(self):
"""
Converts the RMG Species object to a Cantera Species object
with the appropriate thermo data.
"""
import cantera as ct
# Determine the number of each type of element in the molecule
elementDict = {} # elementCounts = [0,0,0,0]
for atom in self.molecule[0].atoms:
# The atom itself
symbol = atom.element.symbol
if symbol not in elementDict:
elementDict[symbol] = 1
else:
elementDict[symbol] += 1
ctSpecies = ct.Species(self.toChemkin(), elementDict)
if self.thermo:
try:
ctSpecies.thermo = self.thermo.toCantera()
except Exception, e:
print e
raise Exception('Could not convert thermo to create Cantera Species object. Check that thermo is a NASA polynomial.')
if self.transportData:
ctSpecies.transport = self.transportData.toCantera()
return ctSpecies
def hasStatMech(self):
"""
Return ``True`` if the species has statistical mechanical parameters,
or ``False`` otherwise.
"""
return self.conformer is not None and (len(self.conformer.modes) > 0 or (len(self.molecule) > 0 and len(self.molecule[0].atoms) == 1))
def hasThermo(self):
"""
Return ``True`` if the species has thermodynamic parameters, or
``False`` otherwise.
"""
return self.thermo is not None
def getPartitionFunction(self, T):
"""
Return the partition function for the species at the specified
temperature `T` in K.
"""
cython.declare(Q=cython.double)
if self.hasStatMech():
Q = self.conformer.getPartitionFunction(T)
else:
raise Exception('Unable to calculate partition function for species {0!r}: no statmech data available.'.format(self.label))
return Q
def getHeatCapacity(self, T):
"""
Return the heat capacity in J/mol*K for the species at the specified
temperature `T` in K.
"""
cython.declare(Cp=cython.double)
Cp = 0.0
if self.hasThermo():
Cp = self.getThermoData().getHeatCapacity(T)
elif self.hasStatMech():
Cp = self.conformer.getHeatCapacity(T)
else:
raise Exception('Unable to calculate heat capacity for species {0!r}: no thermo or statmech data available.'.format(self.label))
return Cp
def getEnthalpy(self, T):
"""
Return the enthalpy in J/mol for the species at the specified
temperature `T` in K.
"""
cython.declare(H=cython.double)
H = 0.0
if self.hasThermo():
H = self.getThermoData().getEnthalpy(T)
elif self.hasStatMech():
H = self.conformer.getEnthalpy(T) + self.conformer.E0.value_si
else:
raise Exception('Unable to calculate enthalpy for species {0!r}: no thermo or statmech data available.'.format(self.label))
return H
def getEntropy(self, T):
"""
Return the entropy in J/mol*K for the species at the specified
temperature `T` in K.
"""
cython.declare(S=cython.double)
S = 0.0
if self.hasThermo():
S = self.getThermoData().getEntropy(T)
elif self.hasStatMech():
S = self.conformer.getEntropy(T)
else:
raise Exception('Unable to calculate entropy for species {0!r}: no thermo or statmech data available.'.format(self.label))
return S
def getFreeEnergy(self, T):
"""
Return the Gibbs free energy in J/mol for the species at the specified
temperature `T` in K.
"""
cython.declare(G=cython.double)
G = 0.0
if self.hasThermo():
G = self.getThermoData().getFreeEnergy(T)
elif self.hasStatMech():
G = self.conformer.getFreeEnergy(T) + self.conformer.E0.value_si
else:
raise Exception('Unable to calculate free energy for species {0!r}: no thermo or statmech data available.'.format(self.label))
return G
def getSumOfStates(self, Elist):
"""
Return the sum of states :math:`N(E)` at the specified energies `Elist`
in J/mol.
"""
if self.hasStatMech():
return self.conformer.getSumOfStates(Elist)
else:
raise Exception('Unable to calculate sum of states for species {0!r}: no statmech data available.'.format(self.label))
def getDensityOfStates(self, Elist):
"""
Return the density of states :math:`\\rho(E) \\ dE` at the specified
energies `Elist` in J/mol above the ground state.
"""
if self.hasStatMech():
return self.conformer.getDensityOfStates(Elist)
else:
raise Exception('Unable to calculate density of states for species {0!r}: no statmech data available.'.format(self.label))
def getSymmetryNumber(self):
"""
Get the symmetry number for the species, which is the highest symmetry number amongst
its resonance isomers. This function is currently used for website purposes and testing only as it
requires additional calculateSymmetryNumber calls.
"""
cython.declare(symmetryNumber=cython.int)
symmetryNumber = numpy.max([mol.getSymmetryNumber() for mol in self.molecule])
return symmetryNumber
def calculateCp0(self):
"""
Return the value of the heat capacity at zero temperature in J/mol*K.
"""
return self.molecule[0].calculateCp0()
def calculateCpInf(self):
"""
Return the value of the heat capacity at infinite temperature in J/mol*K.
"""
return self.molecule[0].calculateCpInf()
def copy(self, deep=False):
"""
Create a copy of the current species. If the
kw argument 'deep' is True, then a deep copy will be made of the
Molecule objects in self.molecule.
For other complex attributes, a deep copy will always be made.
"""
from copy import deepcopy
cython.declare(other=Species)
other = Species.__new__(Species)
other.index = self.index
other.label = self.label
other.thermo = deepcopy(self.thermo)
other.molecule = []
for mol in self.molecule:
other.molecule.append(mol.copy(deep=deep))
other.conformer = deepcopy(self.conformer)
other.transportData = deepcopy(self.transportData)
other.molecularWeight = deepcopy(self.molecularWeight)
other.energyTransferModel = deepcopy(self.energyTransferModel)
other.reactive = self.reactive
other.props = deepcopy(self.props)
return other
def getAugmentedInChI(self):
if self.aug_inchi is None:
self.aug_inchi = self.generate_aug_inchi()
return self.aug_inchi
else:
return self.aug_inchi
def generate_aug_inchi(self):
candidates = []
self.generateResonanceIsomers()
for mol in self.molecule:
cand = mol.toAugmentedInChI()
candidates.append(cand)
candidates.sort()
return candidates[0]
def getThermoData(self):
"""
Returns a `thermoData` object of the current Species object.
If the thermo object already exists, it is either of the (Wilhoit, ThermoData)
type, or it is a Future.
If the type of the thermo attribute is Wilhoit, or ThermoData,
then it is converted into a NASA format.
If it is a Future, then a blocking call is made to retrieve the NASA object.
If the thermo object did not exist yet, the thermo object is generated.
"""
from rmgpy.thermo.thermoengine import submit
if self.thermo:
if not isinstance(self.thermo, (NASA, Wilhoit, ThermoData)):
self.thermo = self.thermo.result()
else:
submit(self)
if not isinstance(self.thermo, (NASA, Wilhoit, ThermoData)):
self.thermo = self.thermo.result()
return self.thermo
def generateTransportData(self):
"""
Generate the transportData parameters for the species.
"""
from rmgpy.data.rmg import getDB
try:
transportDB = getDB('transport')
if not transportDB: raise Exception
except Exception, e:
logging.debug('Could not obtain the transport database. Not generating transport...')
raise e
#count = sum([1 for atom in self.molecule[0].vertices if atom.isNonHydrogen()])
self.transportData = transportDB.getTransportProperties(self)[0]
def getTransportData(self):
"""
Returns the transport data associated with this species, and
calculates it if it is not yet available.
"""
if not self.transportData:
self.generateTransportData()
return self.transportData
def generateStatMech(self):
"""
Generate molecular degree of freedom data for the species. You must
have already provided a thermodynamics model using e.g.
:meth:`generateThermoData()`.
"""
from rmgpy.data.rmg import getDB
try:
statmechDB = getDB('statmech')
if not statmechDB: raise Exception
except Exception, e:
logging.debug('Could not obtain the stat. mech database. Not generating stat. mech...')
raise e
molecule = self.molecule[0]
conformer = statmechDB.getStatmechData(molecule, self.getThermoData())
if self.conformer is None:
self.conformer = Conformer()
self.conformer.E0 = self.getThermoData().E0
self.conformer.modes = conformer.modes
self.conformer.spinMultiplicity = conformer.spinMultiplicity
def generateEnergyTransferModel(self):
"""
Generate the collisional energy transfer model parameters for the
species. This "algorithm" is *very* much in need of improvement.
"""
self.energyTransferModel = SingleExponentialDown(
alpha0 = (300*0.011962,"kJ/mol"),
T0 = (300,"K"),
n = 0.85,
)
################################################################################
class TransitionState():
"""
A chemical transition state, representing a first-order saddle point on a
potential energy surface. The attributes are:
=============== ============================================================
Attribute TDescription
=============== ============================================================
`label` A descriptive string label
`conformer` The molecular degrees of freedom model for the species
`frequency` The negative frequency of the first-order saddle point
`tunneling` The type of tunneling model to use for tunneling through the reaction barrier
`degeneracy` The reaction path degeneracy
=============== ============================================================
"""
def __init__(self, label='', conformer=None, frequency=None, tunneling=None, degeneracy=1):
self.label = label
self.conformer = conformer
self.frequency = frequency
self.tunneling = tunneling
self.degeneracy = degeneracy
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
object.
"""
string = 'TransitionState('
if self.label != '': string += 'label="{0}", '.format(self.label)
if self.conformer is not None: string += 'conformer={0!r}, '.format(self.conformer)
if self.frequency is not None: string += 'frequency={0!r}, '.format(self.frequency)
if self.tunneling is not None: string += 'tunneling={0!r}, '.format(self.tunneling)
if self.degeneracy != 1: string += 'degeneracy={0}, '.format(self.degeneracy)
string = string[:-2] + ')'
return string
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (TransitionState, (self.label, self.conformer, self.frequency, self.tunneling, self.degeneracy))
def getFrequency(self):
return self._frequency
def setFrequency(self, value):
self._frequency = quantity.Frequency(value)
frequency = property(getFrequency, setFrequency, """The negative frequency of the first-order saddle point.""")
def getPartitionFunction(self, T):
"""
Return the partition function for the transition state at the
specified temperature `T` in K.
"""
cython.declare(Q=cython.double)
if self.conformer is not None and len(self.conformer.modes) > 0:
Q = self.conformer.getPartitionFunction(T)
else:
raise Exception('Unable to calculate partition function for transition state {0!r}: no statmech data available.'.format(self.label))
return Q
def getHeatCapacity(self, T):
"""
Return the heat capacity in J/mol*K for the transition state at the
specified temperature `T` in K.
"""
cython.declare(Cp=cython.double)
Cp = 0.0
if self.getThermoData() is not None:
Cp = self.getThermoData().getHeatCapacity(T)
elif self.conformer is not None and len(self.conformer.modes) > 0:
Cp = self.conformer.getHeatCapacity(T)
else:
raise Exception('Unable to calculate heat capacity for transition state {0!r}: no thermo or statmech data available.'.format(self.label))
return Cp
def getEnthalpy(self, T):
"""
Return the enthalpy in J/mol for the transition state at the
specified temperature `T` in K.
"""
cython.declare(H=cython.double)
H = 0.0
if self.getThermoData() is not None:
H = self.getThermoData().getEnthalpy(T)
elif self.conformer is not None and len(self.conformer.modes) > 0:
H = self.conformer.getEnthalpy(T)
else:
raise Exception('Unable to calculate enthalpy for transition state {0!r}: no thermo or statmech data available.'.format(self.label))
return H
def getEntropy(self, T):
"""
Return the entropy in J/mol*K for the transition state at the
specified temperature `T` in K.
"""
cython.declare(S=cython.double)
S = 0.0
if self.getThermoData() is not None:
S = self.getThermoData().getEntropy(T)
elif self.conformer is not None and len(self.conformer.modes) > 0:
S = self.conformer.getEntropy(T)
else:
raise Exception('Unable to calculate entropy for transition state {0!r}: no thermo or statmech data available.'.format(self.label))
return S
def getFreeEnergy(self, T):
"""
Return the Gibbs free energy in J/mol for the transition state at the
specified temperature `T` in K.
"""
cython.declare(G=cython.double)
G = 0.0
if self.getThermoData() is not None:
G = self.getThermoData().getFreeEnergy(T)
elif self.conformer is not None and len(self.conformer.modes) > 0:
G = self.conformer.getFreeEnergy(T)
else:
raise Exception('Unable to calculate free energy for transition state {0!r}: no thermo or statmech data available.'.format(self.label))
return G
def getSumOfStates(self, Elist):
"""
Return the sum of states :math:`N(E)` at the specified energies `Elist`
in J/mol.
"""
if self.conformer is not None and len(self.conformer.modes) > 0:
return self.conformer.getSumOfStates(Elist)
else:
raise Exception('Unable to calculate sum of states for transition state {0!r}: no statmech data available.'.format(self.label))
def getDensityOfStates(self, Elist):
"""
Return the density of states :math:`\\rho(E) \\ dE` at the specified
energies `Elist` in J/mol above the ground state.
"""
if self.conformer is not None and len(self.conformer.modes) > 0:
return self.conformer.getDensityOfStates(Elist)
else:
raise Exception('Unable to calculate density of states for transition state {0!r}: no statmech data available.'.format(self.label))
def calculateTunnelingFactor(self, T):
"""
Calculate and return the value of the canonical tunneling correction
factor for the reaction at the given temperature `T` in K.
"""
if self.tunneling is not None:
return self.tunneling.calculateTunnelingFactor(T)
else:
# Return unity
return 1.0
def calculateTunnelingFunction(self, Elist):
"""
Calculate and return the value of the microcanonical tunneling
correction for the reaction at the given energies `Elist` in J/mol.
"""
if self.tunneling is not None:
return self.tunneling.calculateTunnelingFunction(Elist)
else:
# Return step function
kappa = numpy.ones_like(Elist)
E0 = float(self.conformer.E0.value_si)
for r in range(Elist.shape[0]):
if Elist[r] >= E0:
break
kappa[r] = 0.0
return kappa
|
|
#!/usr/bin/env python2.7
import os
from sqlalchemy import *
from sqlalchemy.pool import NullPool
from flask import Flask, request, render_template, g, redirect, Response
from Conversation.conversation import entry
import json
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__, template_folder=tmpl_dir)
DATABASEURI = "postgresql://postgres:postgres@localhost/columbiaconnect"
engine = create_engine(DATABASEURI)
@app.before_request
def before_request():
"""
This function is run at the beginning of every web request
(every time you enter an address in the web browser).
We use it to setup a database connection that can be used throughout the request.
The variable g is globally accessible.
"""
try:
g.conn = engine.connect()
print "Database connected"
print g.conn
except:
print "uh oh, problem connecting to database"
import traceback; traceback.print_exc()
g.conn = None
@app.teardown_request
def teardown_request(exception):
"""
At the end of the web request, this makes sure to close the database connection.
If you don't, the database could run out of memory!
"""
try:
g.conn.close()
except Exception as e:
pass
@app.route('/')
def index():
"""
request is a special object that Flask provides to access web request information:
request.method: "GET" or "POST"
request.form: if the browser submitted a form, this contains the data in the form
request.args: dictionary of URL arguments, e.g., {a:1, b:2} for http://localhost?a=1&b=2
See its API: http://flask.pocoo.org/docs/0.10/api/#incoming-request-data
"""
return render_template("index.html")
@app.route('/search',methods=['GET'])
def search():
intent_map = {"get_location":"Get location","location_lecture":"Get location of lecture", "location_office_hours":"Get location of office hours",\
"get_time":"Get timing","time_lecture":"Get timing of lecture", "time_office_hours":"Get timing of office hours",\
"reviews":"Get reviews", "suggest_course":"Get suggestion for interests","goodbye":"goodbye","hello":"hello"}
text = str(request.args["query"])
response = entry(text)
if response["found"] == True:
page = response["page"]
alt_intents = response["response"]["intents"][1:]
score = response["response"]["intents"][0]["confidence"]
for i,a in enumerate(alt_intents):
alt_intents[i]["intent"] = intent_map[a["intent"]]
page_new = intent_map[page]
if page == "hello":
message = [
"What are the office hours for professor X?",
"What are the office hours for course Y?",
"Review for professor X?",
"Review for course Y?",
"Where does professor X sit?",
"Where is course Y held?",
"Can you suggest some course for \"keyword\"? (e.g. Database, ML) ",
"Tell me something about course Y?"
]
context = dict(data=message)
return render_template("hello.html", **context)
elif page == "reviews":
# intent = "location"
# alt_intents = response["response"]["intents"][1:]
# alt_intents = [{"confidence":0.004, "intent" : "get_location"},{"confidence":0.2, "intent" : "get_course"}]
value = response["value"]
entity = response["response"]["entities"]
total =0
positive = 0
for review in value:
total += 1
if review["sentiment"] == "\"positive\"":
positive += 1
pos_percent = format(((positive/float(total)) * 100),'.2f')
neg_percent = format(100 - ((positive/float(total)) * 100),'.2f')
total =0
approving = 0
disapproving = 0
neutral = 0
vindictive = 0
for review in value:
total += 1
if review["tone"] == "Approving":
approving += 1
elif review["tone"] == "Disapproving":
disapproving += 1
elif review["tone"] == "Vindictive":
vindictive += 1
elif review["tone"] == "Neutral":
neutral += 1
app_percent = format(((approving/float(total)) * 100),'.2f')
dis_percent = format(((disapproving/float(total)) * 100),'.2f')
vin_percent = format(((vindictive/float(total)) * 100),'.2f')
neutral_percent = format(((neutral/float(total)) * 100),'.2f')
context = dict(data=alt_intents, data1=value, data2=pos_percent, data3=neg_percent, data4=entity, data5=[app_percent, dis_percent, vin_percent, neutral_percent],text=text,intent=page_new,score=score)
render_file = str(page) + ".html"
print json.dumps(context, indent=2)
return render_template(render_file, **context)
elif page == "location_office_hours":
value = response["value"]
for val in value:
val["timing"] = val["timing"].decode("utf-8")
print str(val["building"])
val["location"] = str(val["building"]).replace(' ', '+').replace('&', 'and')
print str(val["location"])
val["src"] = "https://www.google.com/maps/embed/v1/place?key=AIzaSyDK0K9X-rKtBwacjisV8vFPTuJBNoM8wFs&q=" + str(val["location"]) + "+of+Columbia+University"
# alt_intents = response["response"]["intents"][1:]
entity = response["response"]["entities"]
context = dict(data=alt_intents, data1=value, data2=entity,text=text,intent=page_new,score=score)
render_file = str(page) + ".html"
return render_template(render_file, **context)
elif page == "location_lecture":
value = response["value"]
for val in value:
val["timing"] = val["timing"].decode("utf-8")
print str(val["building"])
val["location"] = str(val["building"]).replace(' ', '+').replace('&', 'and')
print str(val["location"])
val["src"] = "https://www.google.com/maps/embed/v1/place?key=AIzaSyDK0K9X-rKtBwacjisV8vFPTuJBNoM8wFs&q=" + str(val["location"]) + "+of+Columbia+University"
# alt_intents = response["response"]["intents"][1:]
entity = response["response"]["entities"]
context = dict(data=alt_intents, data1=value, data2=entity,text=text,intent=page_new,score=score)
render_file = str(page) + ".html"
return render_template(render_file, **context)
elif page == "time_office_hours":
value = response["value"]
for val in value:
val["time"] = val["time"].decode("utf-8")
print str(val["building"])
val["location"] = str(val["building"]).replace(' ', '+').replace('&', 'and')
print str(val["location"])
val["src"] = "https://www.google.com/maps/embed/v1/place?key=AIzaSyDK0K9X-rKtBwacjisV8vFPTuJBNoM8wFs&q=" + str(val["location"]) + "+of+Columbia+University"
# alt_intents = response["response"]["intents"][1:]
entity = response["response"]["entities"]
context = dict(data=alt_intents, data1=value, data2=entity,text=text,intent=page_new,score=score)
render_file = str(page) + ".html"
return render_template(render_file, **context)
elif page == "time_lecture":
value = response["value"]
for val in value:
val["time"] = val["time"].decode("utf-8")
print str(val["building"])
val["location"] = str(val["building"]).replace(' ', '+').replace('&', 'and')
print str(val["location"])
val["src"] = "https://www.google.com/maps/embed/v1/place?key=AIzaSyDK0K9X-rKtBwacjisV8vFPTuJBNoM8wFs&q=" + str(val["location"]) + "+of+Columbia+University"
# alt_intents = response["response"]["intents"][1:]
entity = response["response"]["entities"]
context = dict(data=alt_intents, data1=value, data2=entity,text=text,intent=page_new,score=score)
render_file = str(page) + ".html"
return render_template(render_file, **context)
elif page == "suggest_course":
value = response["value"]
# alt_intents = response["response"]["intents"][1:]
entity = response["response"]["entities"]
context = dict(data=alt_intents, data1=value, data2=entity,text=text,intent=page_new,score=score)
render_file = str(page) + ".html"
return render_template(render_file, **context)
else:
intent = response["response"]["intents"][0]
alt_intents = response["response"]["intents"][1:]
for i,a in enumerate(alt_intents):
alt_intents[i]["intent"] = intent_map[a["intent"]]
intent["intent"] = intent_map[intent["intent"]]
context = dict(data=alt_intents, intent=intent, text=text)
return render_template("error.html", **context)
@app.route('/feedback',methods=['GET'])
def feedback():
message = "Thank you for your feedback. We will incorporate this into our system model to account for the mistake"
context = dict(data = message)
#
# render_template looks in the templates/ folder for files.
# for example, the below file reads template/index.html
#
return render_template("feedback.html", **context)
if __name__ == "__main__":
import click
@click.command()
@click.option('--debug', is_flag=True)
@click.option('--threaded', is_flag=True)
@click.argument('HOST', default='0.0.0.0')
@click.argument('PORT', default=8080, type=int)
def run(debug, threaded, host, port):
HOST, PORT = host, port
print "running on %s:%d" % (HOST, PORT)
app.run(host=HOST, port=int(os.getenv('VCAP_APP_PORT', 8080)), debug=debug, threaded=threaded)
run()
|
|
"""Module for the WelcomeCount Cog."""
import datetime
from typing import List, Union
import discord
from redbot.core import Config, checks, commands
from redbot.core.utils.chat_formatting import box
__all__ = ["UNIQUE_ID", "WelcomeCount"]
UNIQUE_ID = 0x6F7951A4
_DEFAULT_WELCOME = (
"Welcome, {mention}, to {server}!\n\n{count} user{plural} joined today!"
)
class WelcomeCount(commands.Cog):
"""A special welcome cog which keeps a daily count of new users.
Idea came from Twentysix's version of Red on the official Red-DiscordBot
server.
"""
def __init__(self):
super().__init__()
self.conf: Config = Config.get_conf(
self, identifier=UNIQUE_ID, force_registration=True
)
self.conf.register_channel(
enabled=False,
last_message=None,
delete_last_message=True,
welcome_msg=_DEFAULT_WELCOME,
)
self.conf.register_channel(
enabled=False, last_message=None, welcome_msg=_DEFAULT_WELCOME
)
self.conf.register_guild(count=0, day=None, join_role=None)
@checks.admin_or_permissions(manage_guild=True)
@commands.guild_only()
@commands.group(invoke_without_command=True, aliases=["wcount"])
async def welcomecount(self, ctx: commands.Context):
"""Manage settings for WelcomeCount."""
if not ctx.invoked_subcommand:
await ctx.send_help()
channel: discord.TextChannel = ctx.channel
settings = self.conf.channel(channel)
if await settings.enabled():
msg: str = await settings.welcome_msg()
delete_last: bool = await settings.delete_last_message()
await ctx.send(
box(
"Enabled in this channel.\n"
"Deletion of previous welcome message enabled: {0}\n"
"Welcome message: {1}"
"".format(delete_last, msg)
)
)
else:
await ctx.send(box("Disabled in this channel."))
@welcomecount.command(name="toggle")
async def welcomecount_toggle(self, ctx: commands.Context):
"""Toggle welcome messages in this channel."""
channel: discord.TextChannel = ctx.channel
settings = self.conf.channel(channel)
now_enabled: bool = not await settings.enabled()
await settings.enabled.set(now_enabled)
await ctx.send(
"Welcome messages are now {0} in this channel."
"".format("enabled" if now_enabled else "disabled")
)
@welcomecount.command(name="message")
async def welcomecount_message(self, ctx: commands.Context, *, message: str):
"""Set the bot's welcome message.
This message can be formatted using these parameters:
mention - Mention the user who joined
username - The user's display name
server - The name of the server
count - The number of users who joined today.
plural - Empty if `count` is 1. 's' otherwise.
total - The total number of users in the server.
To format the welcome message with the above parameters, include them
in your message surrounded by curly braces {}.
"""
channel: discord.TextChannel = ctx.channel
settings = self.conf.channel(channel)
await settings.welcome_msg.set(message)
member: discord.Member = ctx.author
count: int = await self.conf.guild(ctx.guild).count()
params = {
"mention": member.mention,
"username": member.display_name,
"server": ctx.guild.name,
"count": count,
"plural": "" if count == 1 else "s",
"total": ctx.guild.member_count,
}
try:
to_send = message.format(**params)
except KeyError as exc:
await ctx.send(
f"The welcome message cannot be formatted, because it contains an "
f"invalid placeholder `{{{exc.args[0]}}}`. See `{ctx.clean_prefix}help "
f"welcomecount message` for a list of valid placeholders."
)
else:
await ctx.send(
"Welcome message set, here's what it'll look like:\n\n" + to_send
)
@welcomecount.command(name="deletelast")
async def welcomecount_deletelast(self, ctx: commands.Context):
"""Toggle deleting the previous welcome message in this channel.
When enabled, the last message is deleted *only* if it was sent on
the same day as the new welcome message.
"""
channel: discord.TextChannel = ctx.channel
settings = self.conf.channel(channel)
now_deleting: bool = not await settings.delete_last_message()
await settings.delete_last_message.set(now_deleting)
await ctx.send(
"Deleting welcome messages are now {0} in this channel."
"".format("enabled" if now_deleting else "disabled")
)
@welcomecount.command(name="joinrole")
async def welcomecount_joinrole(
self, ctx: commands.Context, *, role: Union[discord.Role, str]
):
"""Set a role which a user must receive before they're welcomed.
This means that, instead of the welcome message being sent when
the user joins the server, the welcome message will be sent when
they receive a particular role.
Use `[p]welcomecount joinrole disable` to revert to the default
behaviour.
"""
if isinstance(role, discord.Role):
await self.conf.guild(ctx.guild).join_role.set(role.id)
await ctx.tick()
elif role.lower() == "disable":
await self.conf.guild(ctx.guild).join_role.clear()
await ctx.tick()
else:
await ctx.send(f'Role "{role}" not found.')
async def send_welcome_message(self, member: discord.Member) -> None:
guild: discord.Guild = member.guild
server_settings = self.conf.guild(guild)
today: datetime.date = datetime.date.today()
new_day: bool = False
if await server_settings.day() == str(today):
cur_count: int = await server_settings.count()
await server_settings.count.set(cur_count + 1)
else:
new_day = True
await server_settings.day.set(str(today))
await server_settings.count.set(1)
welcome_channels: List[discord.TextChannel] = []
# noinspection PyUnusedLocal
channel: discord.TextChannel
for channel in guild.channels:
if await self.conf.channel(channel).enabled():
welcome_channels.append(channel)
for channel in welcome_channels:
channel_settings = self.conf.channel(channel)
delete_last: bool = await channel_settings.delete_last_message()
if delete_last and not new_day:
last_message: int = await channel_settings.last_message()
try:
last_message: discord.Message = await channel.fetch_message(
last_message
)
except discord.HTTPException:
# Perhaps the message was deleted
pass
else:
await last_message.delete()
count: int = await server_settings.count()
params = {
"mention": member.mention,
"username": member.display_name,
"server": guild.name,
"count": count,
"plural": "" if count == 1 else "s",
"total": guild.member_count,
}
welcome: str = await channel_settings.welcome_msg()
msg: discord.Message = await channel.send(welcome.format(**params))
await channel_settings.last_message.set(msg.id)
# Events
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
"""Send the welcome message and update the last message."""
if await self.conf.guild(member.guild).join_role() is None:
await self.send_welcome_message(member)
@commands.Cog.listener()
async def on_member_update(self, before: discord.Member, after: discord.Member):
join_role_id = await self.conf.guild(before.guild).join_role()
if join_role_id is None:
return
before_roles = frozenset(before.roles)
after_roles = frozenset(after.roles)
try:
added_role = next(iter(after_roles - before_roles))
except StopIteration:
# A role wasn't added
return
if added_role.id == join_role_id:
await self.send_welcome_message(after)
|
|
#
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from wherehows.common.writers import FileWriter
from wherehows.common import Constant
from wherehows.common.schemas import OozieFlowRecord
from wherehows.common.schemas import OozieJobRecord
from wherehows.common.schemas import OozieFlowOwnerRecord
from wherehows.common.schemas import OozieFlowExecRecord
from wherehows.common.schemas import OozieJobExecRecord
from wherehows.common.schemas import OozieFlowScheduleRecord
from wherehows.common.schemas import OozieFlowDagRecord
from wherehows.common.enums import SchedulerType
from com.ziclix.python.sql import zxJDBC
from org.slf4j import LoggerFactory
import os
import DbUtil
import sys
class OozieExtract:
def __init__(self, args):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
self.app_id = int(args[Constant.JOB_REF_ID_KEY])
self.wh_exec_id = long(args[Constant.WH_EXEC_ID_KEY])
self.oz_con = zxJDBC.connect(args[Constant.OZ_DB_URL_KEY],
args[Constant.OZ_DB_USERNAME_KEY],
args[Constant.OZ_DB_PASSWORD_KEY],
args[Constant.OZ_DB_DRIVER_KEY])
self.oz_cursor = self.oz_con.cursor()
self.lookback_period = args[Constant.OZ_EXEC_ETL_LOOKBACK_MINS_KEY]
self.app_folder = args[Constant.WH_APP_FOLDER_KEY]
self.metadata_folder = self.app_folder + "/" + str(SchedulerType.OOZIE) + "/" + str(self.app_id)
self.oz_version = 4.0
if not os.path.exists(self.metadata_folder):
try:
os.makedirs(self.metadata_folder)
except Exception as e:
self.logger.error(e)
self.get_oozie_version()
def get_oozie_version(self):
query = "select data from OOZIE_SYS where name = 'oozie.version'"
self.oz_cursor.execute(query)
self.oz_version = self.oz_cursor.fetchone()
self.logger.info("Oozie version: ", self.oz_version[0])
def run(self):
try:
self.collect_flow_jobs(self.metadata_folder + "/flow.csv",
self.metadata_folder + "/job.csv",
self.metadata_folder + "/dag.csv")
self.collect_flow_owners(self.metadata_folder + "/owner.csv")
self.collect_flow_schedules(self.metadata_folder + "/schedule.csv")
self.collect_flow_execs(self.metadata_folder + "/flow_exec.csv", self.lookback_period)
self.collect_job_execs(self.metadata_folder + "/job_exec.csv", self.lookback_period)
finally:
self.oz_cursor.close()
self.oz_con.close()
def collect_flow_jobs(self, flow_file, job_file, dag_file):
self.logger.info("collect flow&jobs")
flow_writer = FileWriter(flow_file)
job_writer = FileWriter(job_file)
dag_writer = FileWriter(dag_file)
query = """
SELECT a.*, b.created_time FROM
(SELECT w.app_name, w.app_path, max(w.id) as source_version, max(unix_timestamp(w.last_modified_time)) as last_modified_time
from WF_JOBS w LEFT JOIN WF_JOBS s
ON w.app_path = s.app_path AND w.created_time < s.created_time
WHERE s.created_time IS NULL GROUP BY w.app_name, w.app_path) a
JOIN
(SELECT app_path, min(unix_timestamp(created_time)) as created_time FROM WF_JOBS GROUP BY app_path) b
ON a.app_path = b.app_path
"""
self.oz_cursor.execute(query)
rows = DbUtil.dict_cursor(self.oz_cursor)
for row in rows:
flow_record = OozieFlowRecord(self.app_id,
row['app_name'],
row['app_path'],
0,
row['source_version'],
row['created_time'],
row['last_modified_time'],
self.wh_exec_id)
flow_writer.append(flow_record)
query = """
select name, type, transition from WF_ACTIONS
where wf_id = '{source_version}'
""".format(source_version=row['source_version'])
new_oz_cursor = self.oz_con.cursor()
new_oz_cursor.execute(query)
nodes = DbUtil.dict_cursor(new_oz_cursor)
for node in nodes:
job_record = OozieJobRecord(self.app_id,
row['app_path'],
row['source_version'],
node['name'],
row['app_path'] + "/" + node['name'],
node['type'],
self.wh_exec_id)
job_writer.append(job_record)
if node['transition'] != "*" and node['transition'] is not None:
dag_edge = OozieFlowDagRecord(self.app_id,
row['app_path'],
row['source_version'],
row['app_path'] + "/" + node['name'],
row['app_path'] + "/" + node['transition'],
self.wh_exec_id)
dag_writer.append(dag_edge)
new_oz_cursor.close()
dag_writer.close()
job_writer.close()
flow_writer.close()
def collect_flow_owners(self, owner_file):
self.logger.info("collect owners")
owner_writer = FileWriter(owner_file)
query = "SELECT DISTINCT app_name, app_path, user_name from WF_JOBS"
self.oz_cursor.execute(query)
rows = DbUtil.dict_cursor(self.oz_cursor)
for row in rows:
owner_record = OozieFlowOwnerRecord(self.app_id,
row['app_path'],
row['user_name'],
self.wh_exec_id)
owner_writer.append(owner_record)
owner_writer.close()
def collect_flow_schedules(self, schedule_file):
self.logger.info("collect flow schedule")
schedule_writer = FileWriter(schedule_file)
query = """
SELECT DISTINCT cj.id as ref_id, cj.frequency, cj.time_unit,
unix_timestamp(cj.start_time) as start_time, unix_timestamp(cj.end_time) as end_time,
wj.app_path
FROM COORD_JOBS cj JOIN COORD_ACTIONS ca ON ca.job_id = cj.id JOIN WF_JOBS wj ON ca.external_id = wj.id
WHERE cj.status = 'RUNNING'
"""
self.oz_cursor.execute(query)
rows = DbUtil.dict_cursor(self.oz_cursor)
for row in rows:
schedule_record = OozieFlowScheduleRecord(self.app_id,
row['app_path'],
row['time_unit'],
str(row['frequency']),
None,
row['start_time'],
row['end_time'],
row['ref_id'],
self.wh_exec_id)
schedule_writer.append(schedule_record)
schedule_writer.close()
def collect_flow_execs(self, flow_exec_file, lookback_period):
self.logger.info("collect flow execs")
flow_exec_writer = FileWriter(flow_exec_file)
query = "select id, app_name, app_path, unix_timestamp(start_time) as start_time, unix_timestamp(end_time) as end_time, run, status, user_name from WF_JOBS where end_time > now() - INTERVAL %d MINUTE" % (int(lookback_period))
self.oz_cursor.execute(query)
rows = DbUtil.dict_cursor(self.oz_cursor)
for row in rows:
flow_exec_record = OozieFlowExecRecord(self.app_id,
row['app_name'],
row['app_path'],
row['id'],
row['id'],
row['status'],
row['run'],
row['user_name'],
row['start_time'],
row['end_time'],
self.wh_exec_id)
flow_exec_writer.append(flow_exec_record)
flow_exec_writer.close()
def collect_job_execs(self, job_exec_file, lookback_period):
self.logger.info("collect job execs")
job_exec_writer = FileWriter(job_exec_file)
query = """
select a.id as job_exec_id, a.name as job_name, j.id as flow_exec_id, a.status, a.user_retry_count,
unix_timestamp(a.start_time) start_time, unix_timestamp(a.end_time) end_time,
j.app_name as jname, j.app_path, transition from WF_ACTIONS a JOIN WF_JOBS j on a.wf_id = j.id where j.end_time > now() - INTERVAL %d MINUTE
""" % (int(lookback_period))
self.oz_cursor.execute(query)
rows = DbUtil.dict_cursor(self.oz_cursor)
for row in rows:
job_exec_record = OozieJobExecRecord(self.app_id,
row['app_path'],
row['flow_exec_id'],
row['flow_exec_id'],
row['job_name'],
row['app_path'] + "/" + row['job_name'],
row['job_exec_id'],
row['status'],
row['user_retry_count'],
row['start_time'],
row['end_time'],
self.wh_exec_id)
job_exec_writer.append(job_exec_record)
job_exec_writer.close()
if __name__ == "__main__":
props = sys.argv[1]
az = OozieExtract(props)
az.run()
|
|
import commands
import json
import os
import traceback
import Job
import pUtil
from pUtil import tolog
class GetJob:
def __init__(self, pilot_initdir, node, siteInfo, jobSite):
self.__pilot_initdir = pilot_initdir
self.__pilotWorkingDir = pilot_initdir
self.__env = None
self.__node = node
self.__siteInfo = siteInfo
self.__jobSite = jobSite
self.__thisExperiment = None
self.setup()
def setup(self):
with open(os.path.join(self.__pilot_initdir, 'env.json')) as inputFile:
self.__env = json.load(inputFile)
self.__env['si'] = self.__siteInfo
self.__env['thisSite'] = self.__jobSite
self.__env['workerNode'] = self.__node
self.__thisExperiment = self.__env['experiment']
self.__env['pilot_initdir'] = self.__pilot_initdir
def getProdSourceLabel(self):
""" determine the job type """
prodSourceLabel = None
# not None value; can be user (user analysis job), ddm (panda mover job, sitename should contain DDM)
# test will return a testEvgen/testReco job, ptest will return a job sent with prodSourceLabel ptest
if self.__env['uflag']:
if self.__env['uflag'] == 'self' or self.__env['uflag'] == 'ptest':
if self.__env['uflag'] == 'ptest':
prodSourceLabel = self.__env['uflag']
elif self.__env['uflag'] == 'self':
prodSourceLabel = 'user'
else:
prodSourceLabel = self.__env['uflag']
# for PandaMover jobs the label must be ddm
if "DDM" in self.__env['thisSite'].sitename or (self.__env['uflag'] == 'ddm' and self.__env['thisSite'].sitename == 'BNL_ATLAS_test'):
prodSourceLabel = 'ddm'
elif "Install" in self.__env['thisSite'].sitename: # old, now replaced with prodSourceLabel=install
prodSourceLabel = 'software'
if pUtil.readpar('status').lower() == 'test' and self.__env['uflag'] != 'ptest' and self.__env['uflag'] != 'ddm':
prodSourceLabel = 'test'
# override for release candidate pilots
if self.__env['pilot_version_tag'] == "RC":
prodSourceLabel = "rc_test"
if self.__env['pilot_version_tag'] == "DDM":
prodSourceLabel = "ddm"
return prodSourceLabel
def getDispatcherDictionary(self, _diskSpace, tofile, nJobs=1):
""" Construct a dictionary for passing to jobDispatcher """
pilotErrorDiag = ""
# glExec proxy key
_getProxyKey = "False"
# Eddie - commented out
# if pUtil.readpar('glexec').lower() in ['true', 'uid']:
# _getProxyKey = "True"
nodename = self.__env['workerNode'].nodename
pUtil.tolog("Node name: %s" % (nodename))
jNode = {'siteName': self.__env['thisSite'].sitename,
'cpu': self.__env['workerNode'].cpu,
'mem': self.__env['workerNode'].mem,
'diskSpace': _diskSpace,
'node': nodename,
'computingElement': self.__env['thisSite'].computingElement,
'getProxyKey': _getProxyKey,
'nJobs': nJobs,
'workingGroup': self.__env['workingGroup']}
if self.__env['countryGroup'] == "":
pUtil.tolog("No country group selected")
else:
jNode['countryGroup'] = self.__env['countryGroup']
pUtil.tolog("Using country group: %s" % (self.__env['countryGroup']))
if self.__env['workingGroup'] == "":
pUtil.tolog("No working group selected")
else:
pUtil.tolog("Using working group: %s" % (jNode['workingGroup']))
if self.__env['allowOtherCountry']:
pUtil.tolog("allowOtherCountry is set to True (will be sent to dispatcher)")
jNode['allowOtherCountry'] = self.__env['allowOtherCountry']
# should the job be requested for a special DN?
if self.__env['uflag'] == 'self':
# get the pilot submittor DN, and only process this users jobs
DN, pilotErrorDiag = self.getDN()
if DN == "":
return {}, "", pilotErrorDiag
else:
jNode['prodUserID'] = DN
pUtil.tolog("prodUserID: %s" % (jNode['prodUserID']))
# determine the job type
prodSourceLabel = self.getProdSourceLabel()
if prodSourceLabel:
jNode['prodSourceLabel'] = prodSourceLabel
pUtil.tolog("prodSourceLabel: %s" % (jNode['prodSourceLabel']), tofile=tofile)
# send the pilot token
# WARNING: do not print the jNode dictionary since that will expose the pilot token
if self.__env['pilotToken']:
jNode['token'] = self.__env['pilotToken']
return jNode, prodSourceLabel, pilotErrorDiag
def getDN(self):
""" Return the DN for the pilot submitter """
DN = ""
pilotErrorDiag = ""
# Try to use arcproxy first since voms-proxy-info behaves poorly under SL6
# cmd = "arcproxy -I |grep 'subject'| sed 's/.*: //'"
cmd = "arcproxy -i subject"
pUtil.tolog("Executing command: %s" % (cmd))
err, out = commands.getstatusoutput(cmd)
if "command not found" in out:
pUtil.tolog("!!WARNING!!1234!! arcproxy is not available")
pUtil.tolog("!!WARNING!!1235!! Defaulting to voms-proxy-info (can lead to memory problems with the command in case of low schedconfig.memory setting)")
# Default to voms-proxy-info
cmd = "voms-proxy-info -subject"
pUtil.tolog("Executing command: %s" % (cmd))
err, out = commands.getstatusoutput(cmd)
if err == 0:
DN = out
pUtil.tolog("Got DN = %s" % (DN))
CN = "/CN=proxy"
if not DN.endswith(CN):
pUtil.tolog("!!WARNING!!1234!! DN does not end with %s (will be added)" % (CN))
DN += CN
else:
pilotErrorDiag = "User=self set but cannot get proxy: %d, %s" % (err, out)
return DN, pilotErrorDiag
def writeDispatcherEC(self, EC):
""" write the dispatcher exit code to file """
filename = os.path.join(self.__env['pilot_initdir'], "STATUSCODE")
if os.path.exists(filename):
try:
os.remove(filename)
except Exception, e:
pUtil.tolog("Warning: Could not remove file: %s" % str(e))
else:
pUtil.tolog("Removed existing STATUSCODE file")
pUtil.writeToFile(os.path.join(filename), str(EC))
def getStatusCode(self, data):
""" get and write the dispatcher status code to file """
pUtil.tolog("Parsed response: %s" % str(data))
try:
StatusCode = data['StatusCode']
except Exception, e:
pilotErrorDiag = "Can not receive any job from jobDispatcher: %s" % str(e)
pUtil.tolog("!!WARNING!!1200!! %s" % (pilotErrorDiag))
StatusCode = '45'
# Put the StatusCode in a file (used by some pilot wrappers), erase if it already exists
self.writeDispatcherEC(StatusCode)
return StatusCode
def backupDispatcherResponse(self, response, tofile):
""" Backup response (will be copied to workdir later) """
try:
fh = open(self.__env['pandaJobDataFileName'], "w")
fh.write(response)
fh.close()
except Exception, e:
pUtil.tolog("!!WARNING!!1999!! Could not store job definition: %s" % str(e), tofile=tofile)
else:
pUtil.tolog("Job definition stored (for later backup) in file %s" % (self.__env['pandaJobDataFileName']), tofile=tofile)
def backupJobData(self, newJob, data):
filename = os.path.join(self.__pilotWorkingDir, "Job_%s.json" % newJob.jobId)
content = {'workdir': newJob.workdir, 'data': data, 'experiment': self.__thisExperiment}
with open(filename, 'w') as outputFile:
json.dump(content, outputFile)
def getNewJob(self, tofile=True, nJobs=1):
try:
_maxinputsize = pUtil.getMaxInputSize(MB=True)
_disk = self.__node.disk
pUtil.tolog("Available WN disk space: %d MB" % (_disk))
_diskSpace = min(_disk, _maxinputsize)
pUtil.tolog("Sending disk space %d MB to dispatcher" % (_diskSpace))
# construct a dictionary for passing to jobDispatcher and get the prodSourceLabel
jNode, prodSourceLabel, pilotErrorDiag = self.getDispatcherDictionary(_diskSpace, tofile, nJobs)
if jNode == {}:
errorText = "!!FAILED!!1200!! %s" % (pilotErrorDiag)
pUtil.tolog(errorText, tofile=tofile)
# send to stderr
print >> sys.stderr, errorText
return None, None, pilotErrorDiag
# get a random server
url = '%s:%s/server/panda' % (self.__env['pshttpurl'], str(self.__env['psport']))
pUtil.tolog("Looking for a primary job (contacting server at %s)" % (url), tofile=tofile)
# make http connection to jobdispatcher
# format: status, parsed response (data), response
ret = pUtil.httpConnect(jNode, url, mode = "GETJOB", path = self.__pilotWorkingDir, experiment = self.__thisExperiment) # connection mode is GETJOB
# get and write the dispatcher status code to file
StatusCode = str(ret[0])
# the original response will be put in a file in this function
data = ret[1] # dictionary
response = ret[2] # text
# write the dispatcher exit code to file
self.writeDispatcherEC(StatusCode)
if ret[0]: # non-zero return
return None, None, pUtil.getDispatcherErrorDiag(ret[0])
if StatusCode != '0':
pilotErrorDiag = "No job received from jobDispatcher, StatusCode: %s" % (StatusCode)
pUtil.tolog("%s" % (pilotErrorDiag), tofile=tofile)
return None, None, pilotErrorDiag
# backup response (will be copied to workdir later)
self.backupDispatcherResponse(response, tofile)
if not data.has_key("jobs"):
jobs = [data]
else:
jobs = data['jobs']
newJobs = []
newJobsData = {}
for job in jobs:
# test if he attempt number was sent
try:
attemptNr = int(job['attemptNr'])
except Exception,e:
pUtil.tolog("!!WARNING!!1200!! Failed to get attempt number from server: %s" % str(e), tofile=tofile)
else:
pUtil.tolog("Attempt number from server: %d" % attemptNr)
# should there be a delay before setting running state?
try:
nSent = int(job['nSent'])
except Exception,e:
nSent = 0
else:
pUtil.tolog("Received nSent: %d" % (nSent))
if job.has_key('prodSourceLabel'):
if job['prodSourceLabel'] == "":
pUtil.tolog("Setting prodSourceLabel in job def data: %s" % (prodSourceLabel))
job['prodSourceLabel'] = prodSourceLabel
else:
pUtil.tolog("prodSourceLabel already set in job def data: %s" % (job['prodSourceLabel']))
# override ptest value if install job to allow testing using dev pilot
if prodSourceLabel == "ptest" and "atlpan/install/sw-mgr" in job['transformation']:
pUtil.tolog("Dev pilot will run test install job (job.prodSourceLabel set to \'install\')")
job['prodSourceLabel'] = "install"
else:
pUtil.tolog("Adding prodSourceLabel to job def data: %s" % (prodSourceLabel))
job['prodSourceLabel'] = prodSourceLabel
# look for special commands in the job parameters (can be set by HammerCloud jobs; --overwriteQueuedata, --disableFAX)
# if present, queuedata needs to be updated (as well as jobParameters - special commands need to be removed from the string)
job['jobPars'], transferType = self.__siteInfo.updateQueuedataFromJobParameters(job['jobPars'])
if transferType != "":
# we will overwrite whatever is in job.transferType using jobPars
job['transferType'] = transferType
# update the copytoolin if transferType is set to fax/xrd
if job.has_key('transferType'):
if job['transferType'] == 'fax' or job['transferType']== 'xrd':
if pUtil.readpar('faxredirector') != "":
pUtil.tolog("Encountered transferType=%s, will use FAX site mover for stage-in" % (job['transferType']))
ec = self.__siteInfo.replaceQueuedataField("copytoolin", "fax")
ec = self.__siteInfo.replaceQueuedataField("allowfax", "True")
ec = self.__siteInfo.replaceQueuedataField("timefloor", "")
else:
pilotErrorDiag = "Cannot switch to FAX site mover for transferType=%s since faxredirector is not set" % (job['transferType'])
pUtil.tolog("!!WARNING!!1234!! %s" % (pilotErrorDiag))
return None, None, pilotErrorDiag
# convert the data into a file for child process to pick for running real job later
try:
f = open("Job_%s.py" % job['PandaID'], "w")
print >>f, "job=", job
f.close()
except Exception,e:
pilotErrorDiag = "[pilot] Exception caught: %s" % str(e)
pUtil.tolog("!!WARNING!!1200!! %s" % (pilotErrorDiag), tofile=tofile)
return None, None, pilotErrorDiag
# create the new job
newJob = Job.Job()
newJob.setJobDef(job) # fill up the fields with correct values now
newJob.mkJobWorkdir(self.__pilotWorkingDir)
self.backupJobData(newJob, job)
newJob.datadir = self.__jobSite.workdir + "/PandaJob_%s_data" % (newJob.jobId)
newJob.experiment = self.__thisExperiment
if job.has_key('logGUID'):
logGUID = job['logGUID']
if logGUID != "NULL" and logGUID != "":
newJob.tarFileGuid = logGUID
pUtil.tolog("Got logGUID from server: %s" % (logGUID), tofile=tofile)
else:
pUtil.tolog("!!WARNING!!1200!! Server returned NULL logGUID", tofile=tofile)
pUtil.tolog("Using generated logGUID: %s" % (newJob.tarFileGuid), tofile=tofile)
else:
pUtil.tolog("!!WARNING!!1200!! Server did not return logGUID", tofile=tofile)
pUtil.tolog("Using generated logGUID: %s" % (newJob.tarFileGuid), tofile=tofile)
if newJob.prodSourceLabel == "":
pUtil.tolog("Giving new job prodSourceLabel=%s" % (prodSourceLabel))
newJob.prodSourceLabel = prodSourceLabel
else:
pUtil.tolog("New job has prodSourceLabel=%s" % (newJob.prodSourceLabel))
# should we use debug mode?
if job.has_key('debug'):
if job['debug'].lower() == "true":
self.__env['update_freq_server'] = 5*30
pUtil.tolog("Debug mode requested: Updating server update frequency to %d s" % (self.__env['update_freq_server']))
newJobs.append(newJob)
newJobsData[newJob.jobId] = job
return newJobs, newJobsData, ""
except:
errLog = "Failed to get New job: %s" % (traceback.format_exc())
tolog(errLog)
return None, None, errLog
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-*-*- encoding: utf-8 -*-*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: PO
import Axon
import feedparser
from Kamaelia.Protocol.HTTP.HTTPClient import SimpleHTTPClient
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.Splitter import Plug, PlugSplitter
from Kamaelia.Util.OneShot import OneShot
from Axon.Ipc import producerFinished, shutdownMicroprocess
from ForwarderComponent import Forwarder
SAVE = 'pickle'
if SAVE == 'pickle':
import pickle
FILENAME = 'feeds-control.tmp'
def reset():
pickle.dump({}, open(FILENAME, 'w'))
def started(url):
data = pickle.load(open(FILENAME))
data[url] = 'started'
pickle.dump(data, open(FILENAME, 'w'))
def stopped(url):
data = pickle.load(open(FILENAME))
data[url] = 'stopped'
pickle.dump(data, open(FILENAME, 'w'))
reset()
else:
def started(url):
pass
def stopped(url):
pass
class Feedparser(Axon.Component.component):
"""
Feedparser(feedUrl) -> Feedparser object
It receives the content of a feed and sends the parsed
content. The parsed content is in a feedparser.FeedParserDict
object. It sets the 'href' attribute to the feedUrl.
"""
def __init__(self, feedUrl):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(Feedparser, self).__init__()
self.feedUrl = feedUrl
def main(self):
while True:
if self.dataReady("inbox"):
data = self.recv("inbox")
parseddata = feedparser.parse(data)
parseddata.href = self.feedUrl
if parseddata.has_key('bozo_exception'):
self.send(producerFinished(self),"signal")
stopped(self.feedUrl)
return
else:
self.send(parseddata, "outbox")
self.send(producerFinished(self),"signal")
stopped(self.feedUrl)
return
if self.dataReady("control"):
data = self.recv("control")
self.send(data,"signal")
if not isinstance(data, producerFinished):
print data
stopped(self.feedUrl)
return
if not self.anyReady():
self.pause()
yield 1
class FeedParserFactory(Axon.Component.component):
"""
FeedParserFactory() -> FeedParserFactory object
It receives different feed URLs throught the "inbox" inbox
and returns each post parsed through the "outbox" outbox.
This class can handles multiple concurrent petitions, retrieves
the content of the feed and parses it with the feedparser library.
The result is a feedparser.FeedParserDict per each feed URL
provided.
"""
Inboxes = {
"inbox" : "Strings representing different URLs of feeds",
"control" : "From component...",
"_parsed-feeds" : "Parsed feeds retrieved from FeedParserFactory children",
}
Outboxes = {
"outbox" : "feedparser.FeedParserDict object representing a parsed feed",
"signal" : "From component...",
"_signal" : "To the internal parsers",
}
def __init__(self, **argd):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(FeedParserFactory, self).__init__(**argd)
self.mustStop = None
self.providerFinished = False
def makeFeedParser(self, feedUrl):
"""
makeFeedParser(feedUrl) -> Pipeline
It returns a pipeline which does not expect any input except for signals and
sends the parsed data through the "outbox" outbox.
"""
started(feedUrl)
return Pipeline(
OneShot(feedUrl),
SimpleHTTPClient(), # TODO: SimpleHTTPClient doesn't seem to have proxy support
)
def checkControl(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg,producerFinished):
self.providerFinished = True
elif isinstance(msg,shutdownMicroprocess):
self.mustStop = msg
return self.mustStop, self.providerFinished
def handleChildTerminations(self): #taken from Carousel.py
for child in self.childComponents():
if child._isStopped():
self.removeChild(child)
def initiateInternalSplitter(self):
self.internalSplitter = PlugSplitter()
self.link((self,'_signal'), (self.internalSplitter, 'control'))
self.addChildren(self.internalSplitter)
self.internalSplitter.activate()
def linkChildToInternalSplitter(self, child):
forwarder = Forwarder()
plug = Plug(self.internalSplitter, forwarder)
plug.activate()
plug.link((plug, 'signal'), (child, 'control'))
child.link((child, 'signal'), (plug, 'control'))
def createChild(self, feed):
child = self.makeFeedParser(feed.url)
child = Pipeline(child, Feedparser(feed.url))
self.link( (child, 'outbox'), (self, '_parsed-feeds') )
self.linkChildToInternalSplitter(child)
return child
def waitForChildren(self, signalMessage):
self.send(signalMessage,"_signal")
while len(self.childComponents()) > 0:
self.handleChildTerminations()
yield 1
def main(self):
self.initiateInternalSplitter()
yield 1
while True:
mustStop, providerFinished = self.checkControl()
if mustStop:
self.send(mustStop,"signal")
return
self.handleChildTerminations()
while self.dataReady("inbox"):
feed = self.recv("inbox")
child = self.createChild(feed)
self.addChildren(child)
child.activate()
while self.dataReady("_parsed-feeds"):
parseddata = self.recv("_parsed-feeds")
self.send(parseddata,"outbox")
if providerFinished and len(self.childComponents()) == 1:
# TODO: CHECK IF THIS IS THE PROBLEM
# It's actually only waiting for the plugsplitter
for _ in self.waitForChildren(producerFinished(self)):
yield 1
pfinished = producerFinished(self)
self.send(pfinished,"signal")
return
if not self.anyReady():
self.pause()
yield 1
|
|
"""Compare marked moles across rotomaps.
Controls:
'left arrow' or 'right arrow' to change image in the left slot.
'up arrow' or 'down arrow' to change rotomap in the left slot.
'space' to swap left slot and right slot.
'p' or 'n' to change the mole being examined in both slots.
'a' to toggle crosshairs on/off.
'c' to mark the moles as changed in the newest rotomap.
'u' to mark the moles as unchanged in the newest rotomap.
'l' to zoom and rotate the left slot to roughly align with the right slot.
'z' to zoom in on the left slot.
'x' to zoom out on the left slot.
'j' to rotate left on the left slot.
'k' to rotate right on the left slot.
'q' to quit.
"""
import collections
import functools
import math
import cv2
import numpy
import mel.lib.common
import mel.lib.datetime
import mel.lib.image
import mel.lib.math
import mel.lib.moleimaging
import mel.rotomap.display
import mel.rotomap.moles
_PosInfo = collections.namedtuple(
"_PosInfo", "path pos ellipse_xpos uuid uuid_points"
)
def setup_parser(parser):
parser.add_argument(
"ROTOMAP",
type=mel.rotomap.moles.make_argparse_rotomap_directory,
nargs="+",
help=(
"A list of paths to rotomaps. The last rotomap is considered "
"'the target', and only UUIDs from that one will be compared."
),
)
def process_args(args):
target_rotomap = args.ROTOMAP[-1]
target_uuids = target_rotomap.calc_uuids()
uuid_to_rotomaps_imagepos_list = collections.defaultdict(
lambda: collections.defaultdict(list)
)
for rotomap in args.ROTOMAP:
for frame in rotomap.yield_frames():
if "ellipse" not in frame.metadata:
raise Exception(
f"{frame} has no ellipse metadata, "
'try running "rotomap calc-space"'
)
ellipse = frame.metadata["ellipse"]
elspace = mel.lib.ellipsespace.Transform(ellipse)
for uuid_, point in frame.moledata.uuid_points.items():
if uuid_ not in target_uuids:
continue
posinfo = _PosInfo(
path=frame.path,
pos=point,
ellipse_xpos=elspace.to_space(point)[0],
uuid=uuid_,
uuid_points=frame.moledata.uuid_points,
)
uuid_to_rotomaps_imagepos_list[uuid_][rotomap.path].append(
posinfo
)
# We can't compare moles that are only in one rotomap, cull these.
uuid_to_rotomaps_imagepos_list = {
key: value
for key, value in uuid_to_rotomaps_imagepos_list.items()
if len(value) > 1
}
if not uuid_to_rotomaps_imagepos_list:
raise Exception("Nothing to compare.")
# Ensure we're not using a defaultdict, otherwise we might miss a KeyError.
uuid_to_rotomaps_imagepos_list = dict(uuid_to_rotomaps_imagepos_list)
def unchanged_status_keyfunc(uuid_):
is_unchanged = is_lesion_unchanged(target_rotomap, uuid_)
if is_unchanged is None:
return 1
if not is_unchanged:
return 0
return 2
uuid_order = list(uuid_to_rotomaps_imagepos_list)
uuid_order.sort(key=unchanged_status_keyfunc)
uuid_ = uuid_order[0]
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
path_images_tuple = tuple(uuid_to_rotomaps_imagepos_list[uuid_].values())
with mel.lib.fullscreenui.fullscreen_context() as screen:
display = ImageCompareDisplay(screen, path_images_tuple)
on_keydown = _make_on_keydown(
display, uuid_order, target_rotomap, uuid_to_rotomaps_imagepos_list
)
for event in mel.lib.fullscreenui.yield_events_until_quit(screen):
if event.type == pygame.KEYDOWN:
on_keydown(event)
def _make_on_keydown(
display, uuid_order, target_rotomap, uuid_to_rotomaps_imagepos_list
):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
index = 0
uuid_ = uuid_order[index]
is_unchanged = is_lesion_unchanged(target_rotomap, uuid_)
if is_unchanged is not None:
display.indicate_changed(not is_unchanged)
def on_keydown(event):
key = event.key
nonlocal index
if key == pygame.K_RIGHT:
display.next_image()
elif key == pygame.K_LEFT:
display.prev_image()
elif key == pygame.K_UP:
display.prev_rotomap()
elif key == pygame.K_DOWN:
display.next_rotomap()
elif key == pygame.K_n:
num_uuids = len(uuid_to_rotomaps_imagepos_list)
index += 1
index %= num_uuids
uuid_ = uuid_order[index]
path_images_tuple = tuple(
uuid_to_rotomaps_imagepos_list[uuid_].values()
)
display.reset(path_images_tuple)
is_unchanged = is_lesion_unchanged(target_rotomap, uuid_)
if is_unchanged is not None:
display.indicate_changed(not is_unchanged)
elif key == pygame.K_p:
num_uuids = len(uuid_to_rotomaps_imagepos_list)
index -= 1
index %= num_uuids
uuid_ = uuid_order[index]
path_images_tuple = tuple(
uuid_to_rotomaps_imagepos_list[uuid_].values()
)
display.reset(path_images_tuple)
is_unchanged = is_lesion_unchanged(target_rotomap, uuid_)
if is_unchanged is not None:
display.indicate_changed(not is_unchanged)
elif key == pygame.K_SPACE:
display.swap_images()
elif key == pygame.K_a:
display.toggle_crosshairs()
elif key == pygame.K_c:
is_unchanged = False
uuid_ = uuid_order[index]
mark_lesion(target_rotomap, uuid_, is_unchanged=False)
display.indicate_changed()
elif key == pygame.K_u:
is_unchanged = True
uuid_ = uuid_order[index]
mark_lesion(target_rotomap, uuid_, is_unchanged=True)
display.indicate_changed(False)
elif key == pygame.K_z:
display.adjust_zoom(1.025)
elif key == pygame.K_x:
display.adjust_zoom(1 / 1.025)
elif key == pygame.K_l:
display.auto_align()
elif key == pygame.K_j:
display.adjust_rotation(2)
elif key == pygame.K_k:
display.adjust_rotation(-2)
return on_keydown
def is_lesion_unchanged(rotomap, uuid_):
"""Mark the provided uuid changed status in the lesions datafile."""
for lesion in rotomap.lesions:
if lesion["uuid"] == uuid_:
return lesion[mel.rotomap.moles.KEY_IS_UNCHANGED]
return None
def mark_lesion(rotomap, uuid_, *, is_unchanged):
"""Mark the provided uuid changed status in the lesions datafile."""
target_lesion = None
for lesion in rotomap.lesions:
if lesion["uuid"] == uuid_:
target_lesion = lesion
if target_lesion is None:
target_lesion = {"uuid": uuid_}
rotomap.lesions.append(target_lesion)
target_lesion[mel.rotomap.moles.KEY_IS_UNCHANGED] = is_unchanged
mel.rotomap.moles.save_rotomap_dir_lesions_file(
rotomap.path, rotomap.lesions
)
class ImageCompareDisplay:
"""Display two images in a window, supply controls for comparing a list."""
def __init__(self, screen, path_images_tuple):
self._should_draw_crosshairs = True
self._display = screen
self.reset(path_images_tuple)
def reset(self, path_images_tuple):
if not path_images_tuple:
raise ValueError(
"path_images_tuple must be a tuple with at least one thing."
)
for group in path_images_tuple:
if not group:
raise ValueError("path_images_tuple not have empty groups.")
self._rotomaps = path_images_tuple
self._zooms = [1 for _ in path_images_tuple]
self._rotations = [0 for _ in path_images_tuple]
self._rotomap_cursors = [0] * len(self._rotomaps)
for i, rotomap in enumerate(self._rotomaps):
centre_index, _ = min(
enumerate(self._rotomaps[i]),
key=lambda x: x[1].ellipse_xpos * x[1].ellipse_xpos,
)
self._rotomap_cursors[i] = centre_index
self._indices = [0, -1]
self._should_indicate_changed = None
self._show()
def next_image(self):
ix = self._indices[0]
num_images = len(self._rotomaps[ix])
self._rotomap_cursors[ix] += 1
self._rotomap_cursors[ix] %= num_images
self._show()
def prev_image(self):
ix = self._indices[0]
num_images = len(self._rotomaps[ix])
self._rotomap_cursors[ix] -= 1
self._rotomap_cursors[ix] %= num_images
self._show()
def next_rotomap(self):
num_rotomaps = len(self._rotomaps)
self._indices[0] += 1
self._indices[0] %= num_rotomaps
self._show()
def prev_rotomap(self):
num_rotomaps = len(self._rotomaps)
self._indices[0] -= 1
self._indices[0] %= num_rotomaps
self._show()
def swap_images(self):
self._indices.reverse()
self._show()
def toggle_crosshairs(self):
self._should_draw_crosshairs = not self._should_draw_crosshairs
self._show()
def indicate_changed(self, should_indicate_changed=True):
self._should_indicate_changed = should_indicate_changed
self._show()
def adjust_zoom(self, zoom_multiplier):
ix = self._indices[0]
self._zooms[ix] *= zoom_multiplier
self._show()
def adjust_rotation(self, rotation_modifier):
ix = self._indices[0]
self._rotations[ix] += rotation_modifier
self._show()
def _posinfo(self, index):
ix = self._indices[index]
image_index = self._rotomap_cursors[ix]
return self._rotomaps[ix][image_index]
def auto_align(self):
left_posinfo = self._posinfo(0)
right_posinfo = self._posinfo(1)
target_uuid = left_posinfo.uuid
assert right_posinfo.uuid == target_uuid
common_uuids = set(left_posinfo.uuid_points) & set(
right_posinfo.uuid_points
)
common_uuids.remove(target_uuid)
if not common_uuids:
return
left_target_pos = [
pos
for uuid_, pos in left_posinfo.uuid_points.items()
if uuid_ == target_uuid
][0]
nearest_common_uuid = min(
common_uuids,
key=lambda u: mel.lib.math.distance_sq_2d(
left_posinfo.uuid_points[u], left_target_pos
),
)
left_dist = math.sqrt(
mel.lib.math.distance_sq_2d(
left_posinfo.uuid_points[nearest_common_uuid], left_target_pos
)
)
right_target_pos = [
pos
for uuid_, pos in right_posinfo.uuid_points.items()
if uuid_ == target_uuid
][0]
right_dist = math.sqrt(
mel.lib.math.distance_sq_2d(
right_posinfo.uuid_points[nearest_common_uuid],
right_target_pos,
)
)
self._zooms[self._indices[0]] = right_dist / left_dist
self._zooms[self._indices[1]] = 1.0
left_angle = mel.lib.math.angle(
left_posinfo.uuid_points[nearest_common_uuid] - left_target_pos
)
right_angle = mel.lib.math.angle(
right_posinfo.uuid_points[nearest_common_uuid] - right_target_pos
)
self._rotations[self._indices[0]] = right_angle - left_angle
self._rotations[self._indices[1]] = 1.0
self._show()
def _path_pos_zoom_rotation(self, index):
image_index = self._rotomap_cursors[index]
posinfo = self._rotomaps[index][image_index]
zoom = self._zooms[index]
rotation = self._rotations[index]
return posinfo.path, posinfo.pos, zoom, rotation
def _show(self):
image_width = self._display.width // 2
image_height = self._display.height
image_size = numpy.array((image_width, image_height))
border_colour = None
if self._should_indicate_changed is not None:
if self._should_indicate_changed:
border_colour = (0, 0, 255)
else:
border_colour = (0, 255, 0)
images = [
captioned_mole_image(
*self._path_pos_zoom_rotation(i),
image_size,
self._should_draw_crosshairs,
border_colour,
)
for i in self._indices
]
montage = mel.lib.image.montage_horizontal(10, *images)
self._display.show_opencv_image(montage)
def captioned_mole_image(
path,
pos,
zoom,
rotation_degs,
size,
should_draw_crosshairs,
border_colour=None,
):
image, caption_shape = _cached_captioned_mole_image(
str(path), tuple(pos), zoom, tuple(size), rotation_degs
)
if should_draw_crosshairs:
image_crosshairs = image.copy()
xpos = image.shape[1] // 2
ypos = (image.shape[0] - caption_shape[0]) // 2
mel.rotomap.display.draw_crosshair(image_crosshairs, xpos, ypos)
image = cv2.addWeighted(image, 0.75, image_crosshairs, 0.25, 0.0)
if border_colour is not None:
cv2.rectangle(image, (0, 0), (image.shape[1], 10), border_colour, -1)
return image
@functools.lru_cache()
def _cached_captioned_mole_image(path, pos, zoom, size, rotation_degs):
image = mel.lib.image.load_image(path)
image = mel.lib.image.scale_image(image, zoom)
pos = tuple(int(v * zoom) for v in pos)
size = numpy.array(size)
max_size = 2 * max(size)
max_size = numpy.array([max_size, max_size])
max_size_2 = 2 * max_size
image = mel.lib.image.centered_at(image, pos, max_size_2)
image = mel.lib.image.rotated(image, rotation_degs)
image = mel.lib.image.centered_at(image, max_size, size)
caption = mel.lib.image.render_text_as_image(str(path))
return (
mel.lib.image.montage_vertical(10, image, caption),
caption.shape,
)
# -----------------------------------------------------------------------------
# Copyright (C) 2018-2021 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import NoArgsCommand, CommandError, OutputWrapper
from django.core import serializers
from django.db import router, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from django.contrib.auth.models import User
from optparse import make_option
import logging
import sys
#class Command(BaseCommand):
# option_list = BaseCommand.option_list + (
# make_option('--format', default='json', dest='format',
# help='Specifies the output serialization format for fixtures.'),
# make_option('--indent', default=None, dest='indent', type='int',
# help='Specifies the indent level to use when pretty-printing output'),
# make_option('--database', action='store', dest='database',
# default=DEFAULT_DB_ALIAS, help='Nominates a specific database to dump '
# 'fixtures from. Defaults to the "default" database.'),
# make_option('-e', '--exclude', dest='exclude',action='append', default=[],
# help='An appname or appname.ModelName to exclude (use multiple --exclude to exclude multiple apps/models).'),
# make_option('-n', '--natural', action='store_true', dest='use_natural_keys', default=False,
# help='Yay CHRIS Use natural keys if they are available.'),
# make_option('-a', '--all', action='store_true', dest='use_base_manager', default=False,
# help="Use Django's base manager to dump all models stored in the database, including those that would otherwise be filtered or modified by a custom manager."),
# make_option('--pks', dest='primary_keys', help="Only dump objects with "
# "given primary keys. Accepts a comma seperated list of keys. "
# "This option will only work when you specify one model."),
# )
# help = ("Output the contents of the database as a fixture of the given "
# "format (using each model's default manager unless --all is "
# "specified).")
# args = '[appname appname.ModelName ...]'
# def handle(self, *app_labels, **options):
class Command(NoArgsCommand):
def handle_noargs(self, *app_labels, **options):
from django.db.models import get_app, get_apps, get_model
format = options.get('format')
indent = options.get('indent')
using = options.get('database')
excludes = options.get('exclude')
show_traceback = options.get('traceback')
use_natural_keys = options.get('use_natural_keys')
use_base_manager = options.get('use_base_manager')
pks = options.get('primary_keys')
user = options.get('user')
userid = user.id
stdout = OutputWrapper(options.get('stdout', sys.stdout))
if pks:
primary_keys = pks.split(',')
else:
primary_keys = []
excluded_apps = set()
excluded_models = set()
if excludes:
for exclude in excludes:
if '.' in exclude:
app_label, model_name = exclude.split('.', 1)
model_obj = get_model(app_label, model_name)
if not model_obj:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model_obj)
else:
try:
app_obj = get_app(exclude)
excluded_apps.add(app_obj)
except ImproperlyConfigured:
raise CommandError('Unknown app in excludes: %s' % exclude)
if len(app_labels) == 0:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = SortedDict((app, None) for app in get_apps() if app not in excluded_apps)
else:
if len(app_labels) > 1 and primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = SortedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
model = get_model(app_label, model_label)
if model is None:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
if app in app_list.keys():
if app_list[app] and model not in app_list[app]:
app_list[app].append(model)
else:
app_list[app] = [model]
except ValueError:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
# This is just an app - no model qualifier
app_label = label
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
app_list[app] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
try:
serializers.get_serializer(format)
except serializers.SerializerDoesNotExist:
pass
raise CommandError("Unknown serialization format: %s" % format)
def get_objects():
# Collate the objects to be serialized.
for model in sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_syncdb(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
queryset = objects.using(using).order_by(model._meta.pk.name)
if primary_keys:
queryset = queryset.filter(pk__in=primary_keys)
queryset = queryset.filter(owner_id=userid)
for obj in queryset.iterator():
yield obj
try:
stdout.ending = None
# self.stdout.ending = None
serializers.serialize(format, get_objects(), indent=indent,
use_natural_keys=use_natural_keys, stream=stdout)
stdout
# serializers.serialize(format, get_objects(), indent=indent,
# use_natural_keys=use_natural_keys, stream=self.stdout)
except Exception as e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
def sort_dependencies(app_list):
"""Sort a list of app,modellist pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
from django.db.models import get_model, get_models
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app, model_list in app_list:
if model_list is None:
model_list = get_models(app)
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [get_model(*d.split('.')) for d in deps]
else:
deps = []
# Now add a dependency for any FK or M2M relation with
# a model that defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
for field in model._meta.many_to_many:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise CommandError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class servicegroupmember_stats(base_resource) :
""" Statistics for service group entity resource.
"""
def __init__(self) :
self._servicegroupname = ""
self._ip = ""
self._servername = ""
self._port = 0
self._clearstats = ""
self._avgsvrttfb = 0
self._primaryipaddress = ""
self._primaryport = 0
self._servicetype = ""
self._state = ""
self._totalrequests = 0
self._requestsrate = 0
self._totalresponses = 0
self._responsesrate = 0
self._totalrequestbytes = 0
self._requestbytesrate = 0
self._totalresponsebytes = 0
self._responsebytesrate = 0
self._curclntconnections = 0
self._surgecount = 0
self._cursrvrconnections = 0
self._svrestablishedconn = 0
self._curreusepool = 0
self._maxclients = 0
@property
def servicegroupname(self) :
"""Displays statistics for the specified service group.Name of the service group. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my servicegroup" or 'my servicegroup').<br/>Minimum length = 1.
"""
try :
return self._servicegroupname
except Exception as e:
raise e
@servicegroupname.setter
def servicegroupname(self, servicegroupname) :
"""Displays statistics for the specified service group.Name of the service group. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my servicegroup" or 'my servicegroup').
"""
try :
self._servicegroupname = servicegroupname
except Exception as e:
raise e
@property
def ip(self) :
"""IP address of the service group. Mutually exclusive with the server name parameter.
"""
try :
return self._ip
except Exception as e:
raise e
@ip.setter
def ip(self, ip) :
"""IP address of the service group. Mutually exclusive with the server name parameter.
"""
try :
self._ip = ip
except Exception as e:
raise e
@property
def servername(self) :
"""Name of the server. Mutually exclusive with the IP address parameter.<br/>Minimum length = 1.
"""
try :
return self._servername
except Exception as e:
raise e
@servername.setter
def servername(self, servername) :
"""Name of the server. Mutually exclusive with the IP address parameter.
"""
try :
self._servername = servername
except Exception as e:
raise e
@property
def port(self) :
"""Port number of the service group member.<br/>Range 1 - 65535.
"""
try :
return self._port
except Exception as e:
raise e
@port.setter
def port(self, port) :
"""Port number of the service group member.
"""
try :
self._port = port
except Exception as e:
raise e
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def svrestablishedconn(self) :
"""Number of server connections in ESTABLISHED state.
"""
try :
return self._svrestablishedconn
except Exception as e:
raise e
@property
def curclntconnections(self) :
"""Number of current client connections.
"""
try :
return self._curclntconnections
except Exception as e:
raise e
@property
def servicetype(self) :
"""The service type of this service.Possible values are ADNS, DNS, MYSQL, RTSP, SSL_DIAMETER, ADNS_TCP, DNS_TCP, NNTP, SIP_UDP, SSL_TCP, ANY, FTP, RADIUS, SNMP, TCP, DHCPRA, HTTP, RDP, SSL, TFTP, DIAMETER, MSSQL, RPCSVR, SSL_BRIDGE, UDP.
"""
try :
return self._servicetype
except Exception as e:
raise e
@property
def totalrequests(self) :
"""Total number of requests received on this service or virtual server. (This applies to HTTP/SSL services and servers.).
"""
try :
return self._totalrequests
except Exception as e:
raise e
@property
def surgecount(self) :
"""Number of requests in the surge queue.
"""
try :
return self._surgecount
except Exception as e:
raise e
@property
def responsebytesrate(self) :
"""Rate (/s) counter for totalresponsebytes.
"""
try :
return self._responsebytesrate
except Exception as e:
raise e
@property
def totalresponses(self) :
"""Number of responses received on this service or virtual server. (This applies to HTTP/SSL services and servers.).
"""
try :
return self._totalresponses
except Exception as e:
raise e
@property
def requestbytesrate(self) :
"""Rate (/s) counter for totalrequestbytes.
"""
try :
return self._requestbytesrate
except Exception as e:
raise e
@property
def cursrvrconnections(self) :
"""Number of current connections to the actual servers behind the virtual server.
"""
try :
return self._cursrvrconnections
except Exception as e:
raise e
@property
def primaryipaddress(self) :
"""The IP address on which the service is running.
"""
try :
return self._primaryipaddress
except Exception as e:
raise e
@property
def responsesrate(self) :
"""Rate (/s) counter for totalresponses.
"""
try :
return self._responsesrate
except Exception as e:
raise e
@property
def maxclients(self) :
"""Maximum open connections allowed on this service.
"""
try :
return self._maxclients
except Exception as e:
raise e
@property
def avgsvrttfb(self) :
"""Average TTFB between the NetScaler appliance and the server.TTFB is the time interval between sending the request packet to a service and receiving the first response from the service.
"""
try :
return self._avgsvrttfb
except Exception as e:
raise e
@property
def totalrequestbytes(self) :
"""Total number of request bytes received on this service or virtual server.
"""
try :
return self._totalrequestbytes
except Exception as e:
raise e
@property
def curreusepool(self) :
"""Number of requests in the idle queue/reuse pool.
"""
try :
return self._curreusepool
except Exception as e:
raise e
@property
def state(self) :
"""Current state of the server. Possible values are UP, DOWN, UNKNOWN, OFS(Out of Service), TROFS(Transition Out of Service), TROFS_DOWN(Down When going Out of Service).
"""
try :
return self._state
except Exception as e:
raise e
@property
def totalresponsebytes(self) :
"""Number of response bytes received by this service or virtual server.
"""
try :
return self._totalresponsebytes
except Exception as e:
raise e
@property
def primaryport(self) :
"""The port on which the service is running.
"""
try :
return self._primaryport
except Exception as e:
raise e
@property
def requestsrate(self) :
"""Rate (/s) counter for totalrequests.
"""
try :
return self._requestsrate
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(servicegroupmember_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.servicegroupmember
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.servicegroupname) :
return str(self.servicegroupname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
""" Use this API to fetch the statistics of all servicegroupmember_stats resources that are configured on netscaler.
"""
try :
obj = servicegroupmember_stats()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = obj.stat_resource(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class servicegroupmember_response(base_response) :
def __init__(self, length=1) :
self.servicegroupmember = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.servicegroupmember = [servicegroupmember_stats() for _ in range(length)]
|
|
#!/usr/bin/python
import Tkinter as tk
import ttk
import br24_driver
from PIL import Image, ImageTk
import time
import math
import threading
#class br24_ctrl_window(mp.Process):
class br24_ctrl_window(threading.Thread):
def __init__(self, master, br, refresh_period=500):
#mp.Process.__init__(self)
self.refresh_period = refresh_period
threading.Thread.__init__(self)
self.master = master
master.wm_title("BR24 radar options")
self.br = br
self.frame = tk.Frame(self.master)
self.local_interference_opts = ['off', 'low','medium', 'high']
self.interference_reject_opts = ['off', 'low','medium', 'high']
self.target_boost_opts = ['off', 'low', 'high']
self.radar_range_opts = ['50 m','75 m','100 m','250 m','500 m','750 m','1 km','1.5 km','2 km','3 km','4 km','6 km','8 km','12 km','16 km','24 km']
self.fp_opts = ['Gain', 'Rain Clutter Filter', 'Sea Clutter Filter']
self.fp_vals = ['auto_gain', 'manual_gain', 'rain_clutter_manual', 'sea_clutter_auto', 'sea_clutter_manual']
self.fp_opts = ['Auto']
print self.fp_opts.extend(range(1,0x50))
self.button1 = tk.Button(self.frame, text = 'Start Radar', width = 25, command = self.start_driver)
self.button1.pack()
self.button2 = tk.Button(self.frame, text = 'Increase Scanning Speed', width = 25, command = self.increase_scan_speed)
self.button2.pack()
self.button3 = tk.Button(self.frame, text = 'Reset Scanning Speed', width = 25, command = self.reset_scan_speed)
self.button3.pack()
self.radar_range_label = tk.Label(self.frame, text = "Radar Range:")
self.radar_range_label.pack()
self.radar_range_cbox = ttk.Combobox(self.frame, values=self.radar_range_opts)
self.radar_range_cbox.pack()
self.interference_reject_label = tk.Label(self.frame, text = "Interference Rejection:")
self.interference_reject_label.pack()
self.interference_reject_cbox = ttk.Combobox(self.frame, values=self.interference_reject_opts)
self.interference_reject_cbox.pack()
self.target_boost_label = tk.Label(self.frame, text = "Target Boost:")
self.target_boost_label.pack()
self.target_boost_cbox = ttk.Combobox(self.frame, values=self.target_boost_opts)
self.target_boost_cbox.pack()
self.local_interference_label = tk.Label(self.frame, text = "Local Interference Filter:")
self.local_interference_label.pack()
self.local_interference_cbox = ttk.Combobox(self.frame, values=self.local_interference_opts)
self.local_interference_cbox.pack()
self.fp_frame = tk.Frame(self.frame)
self.gain_label = tk.Label(self.fp_frame, text = "Gain:")
self.gain_label.pack()
self.gain_cbox = ttk.Combobox(self.fp_frame, values=self.fp_opts)
self.gain_cbox.pack()
self.rainc_label = tk.Label(self.fp_frame, text = "Rain Clutter")
self.rainc_label.pack()
self.rainc_cbox = ttk.Combobox(self.fp_frame, values=self.fp_opts[1:])
self.rainc_cbox.pack()
self.seac_label = tk.Label(self.fp_frame, text = "Sea Clutter:")
self.seac_label.pack()
self.seac_cbox = ttk.Combobox(self.fp_frame, values=self.fp_opts)
self.seac_cbox.pack()
self.fp_frame.pack(pady=5)
self.frame.pack()
self.newWindow = tk.Toplevel(self.master)
self.image_window = br24_image_window(self.newWindow,self.refresh_period)
self.radar_range_cbox.bind('<<ComboboxSelected>>', self.radar_range_cmd)
self.interference_reject_cbox.bind('<<ComboboxSelected>>', self.interference_reject_cmd)
self.target_boost_cbox.bind('<<ComboboxSelected>>', self.target_boost_cmd)
self.local_interference_cbox.bind('<<ComboboxSelected>>', self.local_interference_cmd)
self.master.protocol("WM_TAKE_FOCUS", self.on_focus)
self.daemon = True
#self.alive = mp.Event()
self.alive = threading.Event()
self.start()
def on_focus(self):
self.image_window.master.lift()
self.master.lift()
def set_driver(self,br):
self.br = br
def start_driver(self):
if not self.br.is_alive():
self.br.start()
self.br.start_radar()
self.button1.config(text = 'Stop Radar', width = 25, command = self.stop_driver)
def stop_driver(self):
self.br.stop_radar()
self.button1.config(text = 'Start Radar', width = 25, command = self.start_driver)
def increase_scan_speed(self):
print "increasing scanning speed by %s"%(1)
self.br.increase_scan_speed(1)
def reset_scan_speed(self):
print "resetting scanning speed to normal"
self.br.reset_scan_speed()
def set_filter_preprocessing(self, event):
print "setting "
self.br.increase_scan_speed(val)
def radar_range_cmd(self, event):
val = self.radar_range_cbox.get()
if val is not '':
print "setting radar range to %s"%(val)
self.br.set_radar_range(self.radar_range_opts.index(val))
def interference_reject_cmd(self, event):
val = self.interference_reject_cbox.get()
if val is not '':
print "setting interference rejection %s"%(val)
self.br.set_interference_rejection(self.interference_reject_opts.index(val))
def local_interference_cmd(self, event):
val = self.local_interference_cbox.get()
if val is not '':
print "setting local interference filter %s"%(val)
self.br.set_local_interference_filter(self.local_interference_opts.index(val))
def target_boost_cmd(self, event):
val =self.target_boost_cbox.get()
if val is not '':
print "setting target boost %s"%(val)
self.br.set_target_boost(self.target_boost_opts.index(val))
def run(self):
self.alive.set()
last_angle = -1
start_time = time.time()
count = 0
while self.alive.is_set():
while self.br.scanline_ready():
sc = self.br.get_scanline()
self.image_window.draw_scanline(sc)
if last_angle > sc['angle']:
#self.image_window.update_radar_image()
curr_time = time.time()-start_time
print "finished full scan: %s %s"%(curr_time,last_angle)
print "processed %d scan lines"%(count)
print "socket queue size: %d"%(self.br.data_q.qsize())
print "scanline queue size: %d"%(self.br.scan_data_decoder.scanlines.qsize())
count = 0
start_time = time.time()
last_angle = sc['angle']
count+=1
time.sleep(0.0001)
class br24_image_window:
def __init__(self, master, refresh_period = 500):
self.refresh_period = refresh_period
# initialize the master window
self.master = master
master.wm_title("BR24 radar image")
self.master.geometry('512x512')
self.master.aspect(1,1,1,1)
# create a frame for putting in the content
self.frame = tk.Frame(self.master)
# create the radar image object
self.radar_image = Image.new("RGB", (512,512), "black")
self.radar_imagetk = ImageTk.PhotoImage(self.radar_image)
self.pixels = self.radar_image.load()
# create a canvas for drawing the radar image and indicators
self.radar_canvas = tk.Canvas(self.frame)
self.radar_image_id = self.radar_canvas.create_image((0,0),image = self.radar_imagetk, anchor="nw")
# add a scanline indicator
self.current_angle=0
self.scanline_indicator = self.radar_canvas.create_line(256,256,256,0,fill="#028802",stipple="",arrow=tk.LAST)
# add the reference circles to the canvas
n_circles = 4
r_step = 256/n_circles
self.reference_circles = [self.radar_canvas.create_oval(0,0,1,1, fill=None, outline="gray25", dash=(4,4)) for x in xrange(n_circles)]
# annotate the circles with distances
self.reference_labels = [self.radar_canvas.create_text(0,0, text="10", anchor="ne", fill="gray35", font="Helvetica 9") for x in xrange(n_circles)]
# place canvas in frame, frame in window
self.radar_canvas.pack(fill = "both", expand = 1)
self.frame.pack(fill = "both", expand = 1)
# initialize the event that will redraw the radar image periodically
self.master.after(self.refresh_period, self.update_radar_image)
#configure resize event
self.radar_canvas.bind('<Configure>', self.resize)
# initialize internal variables
self.height = self.radar_canvas.winfo_height()
self.width = self.radar_canvas.winfo_width()
self.angle_increment = 2.0*math.pi/4096.0
self.center_x = self.width/2.0
self.center_y = self.height/2.0
self.radius = 0.5*min(self.height,self.width)
self.scale = self.radius/512.0
self.scale_mts = 12
self.mutex = threading.Lock()
def draw_reference_circles(self):
r_step = self.radius/len(self.reference_circles)
i=1
for circle_id in self.reference_circles:
self.radar_canvas.coords(circle_id, self.center_x - i*r_step, self.center_y - i*r_step, self.center_x + i*r_step, self.center_y + i*r_step)
i+=1
def draw_reference_labels(self):
r_step = self.radius/len(self.reference_labels)
mts_step = 5.0*math.ceil(self.scale_mts*4.0/5.0)/len(self.reference_labels)
r_step = self.radius/len(self.reference_labels)
i=1
for label_id in self.reference_labels:
self.radar_canvas.coords(label_id, self.center_x + i*r_step, self.center_y)
self.radar_canvas.itemconfig(label_id, text="%.2f"%(mts_step*i))
i+=1
def draw_scanline_indicator(self):
cos_ang = math.cos(self.current_angle)
sin_ang = math.sin(self.current_angle)
r = 2*self.radius
x = int(self.center_x + r*sin_ang)
y = self.height - int(self.center_y + r*cos_ang) - 1
self.radar_canvas.coords(self.scanline_indicator,self.center_x,self.center_y,x,y)
def draw_scanline(self,sc):
self.current_angle= sc['angle']*self.angle_increment
cos_ang = math.cos(self.current_angle)
sin_ang = math.sin(self.current_angle)
r_max = len(sc['data'])
with self.mutex:
for r in xrange(r_max):
intensity = ord(sc['data'][r])
x = int(self.center_x + r*self.scale*sin_ang)
y = self.height - int(self.center_y + r*self.scale*cos_ang) - 1
#y = int(center + r*scale*cos_ang)
try:
self.pixels[x,y] = (0,intensity,20)
except:
print "index out of range x=%d y=%d (w=%d,h=%d)"%(x,y,self.width,self.height)
def draw_scanline_ros(self, msg):
sc = {}
sc['data'] = msg.scanline_data
sc['angle'] = msg.angle
sc['scale'] = msg.scan_radius
self.draw_scanline(sc)
if self.scale_mts != msg.scan_radius:
self.scale_mts = msg.scan_radius
# update the reference labels
self.draw_reference_labels()
def update_radar_image(self):
self.radar_imagetk = ImageTk.PhotoImage(self.radar_image)
self.radar_canvas.itemconfigure(self.radar_image_id, image = self.radar_imagetk)
self.draw_scanline_indicator()
self.master.after(self.refresh_period, self.update_radar_image)
def resize(self,event):
with self.mutex:
if event.width == self.width and event.height == self.height:
return
# update internal variables
self.width = event.width
self.height = event.height
self.center_x = self.width/2.0
self.center_y = self.height/2.0
self.radius = 0.5*min(self.height,self.width)
self.scale = self.radius/512.0
self.radar_image = Image.new("RGB", (self.width,self.height), "black")
self.pixels = self.radar_image.load()
self.draw_reference_circles()
self.draw_reference_labels()
if __name__ == '__main__':
br = br24_driver.br24()
root = tk.Tk()
ctrl_window = br24_ctrl_window(root,br, refresh_period=200)
root.mainloop()
|
|
""" Biological attributes
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2017-05-10
:Copyright: 2017, Karr Lab
:License: MIT
"""
from . import core
import Bio
import Bio.Alphabet
import Bio.motifs.matrix
import Bio.Seq
import Bio.SeqFeature
import copy
import json
import six
class FeatureLocationAttribute(core.LiteralAttribute):
""" Bio.SeqFeature.FeatureLocation attribute
Attributes:
default (:obj:`Bio.SeqFeature.FeatureLocation`): default value
"""
def __init__(self, default=None, none_value=None, verbose_name='', description='',
primary=False, unique=False):
"""
Args:
default (:obj:`Bio.SeqFeature.FeatureLocation`, optional): default value
none_value (:obj:`object`, optional): none value
verbose_name (:obj:`str`, optional): verbose name
description (:obj:`str`, optional): description
primary (:obj:`bool`, optional): indicate if attribute is primary attribute
unique (:obj:`bool`, optional): indicate if attribute value must be unique
"""
if default is not None and not isinstance(default, Bio.SeqFeature.FeatureLocation):
raise ValueError('`default` must be a `Bio.SeqFeature.FeatureLocation` or `None`')
super(FeatureLocationAttribute, self).__init__(default=default, none_value=none_value,
verbose_name=verbose_name,
description=description,
primary=primary, unique=unique)
def deserialize(self, value):
""" Deserialize value
Args:
value (:obj:`str`): semantically equivalent representation
Returns:
:obj:`tuple` of `numpy.array`, `core.InvalidAttribute` or `None`: tuple of cleaned value and cleaning error
"""
if value is None or value == '':
value = None
error = None
elif isinstance(value, six.string_types):
start, end, strand = map(int, value.split(','))
value = Bio.SeqFeature.FeatureLocation(start, end, strand)
error = None
elif isinstance(value, (list, tuple)):
stand, end, strand = value
value = Bio.SeqFeature.FeatureLocation(stand, end, strand)
error = None
elif isinstance(value, Bio.SeqFeature.FeatureLocation):
error = None
else:
value = None
error = core.InvalidAttribute(self, [
('FeatureLocationAttribute must be None, an empty string, '
'a comma-separated string representation of a tuple, a tuple, a list, '
'or a Bio.SeqFeature.FeatureLocation')
])
return (value, error)
def validate(self, obj, value):
""" Determine if `value` is a valid value
Args:
obj (:obj:`Model`): class being validated
value (:obj:`numpy.array`): value of attribute to validate
Returns:
:obj:`core.InvalidAttribute` or None: None if attribute is valid, other return list of errors as an instance of `core.InvalidAttribute`
"""
errors = []
if value is not None and not isinstance(value, Bio.SeqFeature.FeatureLocation):
errors.append('Value must be an instance of `Bio.SeqFeature.FeatureLocation`')
if self.primary and value is None:
errors.append('{} value for primary attribute cannot be empty'.format(
self.__class__.__name__))
if errors:
return core.InvalidAttribute(self, errors)
return None
def validate_unique(self, objects, values):
""" Determine if the attribute values are unique
Args:
objects (:obj:`list` of :obj:`Model`): list of `Model` objects
values (:obj:`list` of :obj:`Bio.SeqFeature.FeatureLocation`): list of values
Returns:
:obj:`core.InvalidAttribute` or None: None if values are unique, otherwise return a list of errors as an instance of `core.InvalidAttribute`
"""
str_values = []
for v in values:
str_values.append(self.serialize(v))
return super(FeatureLocationAttribute, self).validate_unique(objects, str_values)
def serialize(self, value):
""" Serialize string
Args:
value (:obj:`numpy.array`): Python representation
Returns:
:obj:`str`: simple Python representation
"""
if value is None:
return ''
else:
return '{},{},{}'.format(value.start, value.end, value.strand) # :todo: check if this is sufficient
def to_builtin(self, value):
""" Encode a value of the attribute using a simple Python representation (dict, list, str, float, bool, None)
that is compatible with JSON and YAML
Args:
value (:obj:`Bio.SeqFeature.FeatureLocation`): value of the attribute
Returns:
:obj:`dict`: simple Python representation of a value of the attribute
"""
if value is None:
return None
else:
return {'start': value.start, 'end': value.end, 'strand': value.strand}
def from_builtin(self, json):
""" Decode a simple Python representation (dict, list, str, float, bool, None) of a value of the attribute
that is compatible with JSON and YAML
Args:
json (:obj:`dict`): simple Python representation of a value of the attribute
Returns:
:obj:`Bio.SeqFeature.FeatureLocation`: decoded value of the attribute
"""
if json is None:
return None
else:
return Bio.SeqFeature.FeatureLocation(json['start'], json['end'], json['strand'])
class BioSeqAttribute(core.LiteralAttribute):
""" Bio.Seq.Seq attribute
Attributes:
_alphabet (:obj:`Bio.Alphabet.Alphabet`): alphabet
min_length (:obj:`int`): minimum length
max_length (:obj:`int`): maximum length
default (:obj:`Bio.Seq.Seq`): default value
"""
def __init__(self, min_length=0, max_length=float('inf'), default=None, none_value=None, verbose_name='', description='',
primary=False, unique=False):
"""
Args:
min_length (:obj:`int`, optional): minimum length
max_length (:obj:`int`, optional): maximum length
default (:obj:`Bio.Seq.Seq`, optional): default value
none_value (:obj:`object`, optional): none value
verbose_name (:obj:`str`, optional): verbose name
description (:obj:`str`, optional): description
primary (:obj:`bool`, optional): indicate if attribute is primary attribute
unique (:obj:`bool`, optional): indicate if attribute value must be unique
"""
if default is not None and not isinstance(default, Bio.Seq.Seq):
raise ValueError('`default` must be a `Bio.Seq.Seq` or `None`')
if not isinstance(min_length, (six.integer_types, float)) or min_length < 0:
raise ValueError('`min_length` must be a non-negative integer')
if not isinstance(max_length, (six.integer_types, float)) or max_length < min_length:
raise ValueError('`max_length` must be an integer greater than or equal to `min_length`')
super(BioSeqAttribute, self).__init__(default=default, none_value=none_value,
verbose_name=verbose_name,
description=description,
primary=primary, unique=unique)
self.alphabet = None
self.min_length = min_length
self.max_length = max_length
def deserialize(self, value):
""" Deserialize value
Args:
value (:obj:`str`): semantically equivalent representation
Returns:
:obj:`tuple` of `Bio.Seq.Seq`, `core.InvalidAttribute` or `None`: tuple of cleaned value and cleaning error
"""
if value:
if self.alphabet:
value = Bio.Seq.Seq(value, self.alphabet)
else:
tmp = json.loads(value)
alphabet = getattr(Bio.Alphabet, tmp['alphabet']['type'])()
alphabet.size = tmp['alphabet']['size']
alphabet.letters = tmp['alphabet']['letters']
value = Bio.Seq.Seq(tmp['seq'], alphabet)
else:
value = None
return (value, None)
def validate(self, obj, value):
""" Determine if `value` is a valid value
Args:
obj (:obj:`Model`): class being validated
value (:obj:`Bio.Seq.Seq`): value of attribute to validate
Returns:
:obj:`core.InvalidAttribute` or None: None if attribute is valid, other return list of errors as an instance of `core.InvalidAttribute`
"""
errors = []
if value is not None:
if not isinstance(value, Bio.Seq.Seq):
errors.append('Value must be an instance of `Bio.Seq.Seq`')
elif self.alphabet and (
value.alphabet.__class__ != self.alphabet.__class__ or
value.alphabet.letters != self.alphabet.letters or
value.alphabet.size != self.alphabet.size):
errors.append('The alphabet of value must be an instance of `{}`'.format(self.alphabet.__class__.__name__))
if self.min_length and (not value or len(value) < self.min_length):
errors.append('Value must be at least {:d} characters'.format(self.min_length))
if self.max_length and value and len(value) > self.max_length:
errors.append('Value must be less than {:d} characters'.format(self.max_length))
if self.primary and (not value or len(value) == 0):
errors.append('{} value for primary attribute cannot be empty'.format(
self.__class__.__name__))
if errors:
return core.InvalidAttribute(self, errors)
return None
def validate_unique(self, objects, values):
""" Determine if the attribute values are unique
Args:
objects (:obj:`list` of :obj:`Model`): list of `Model` objects
values (:obj:`list` of :obj:`Bio.Seq.Seq`): list of values
Returns:
:obj:`core.InvalidAttribute` or None: None if values are unique, otherwise return a list of errors as an instance of `core.InvalidAttribute`
"""
str_values = []
for v in values:
str_values.append(self.serialize(v))
return super(BioSeqAttribute, self).validate_unique(objects, str_values)
def serialize(self, value):
""" Serialize string
Args:
value (:obj:`Bio.Seq.Seq`): Python representation
Returns:
:obj:`str`: simple Python representation
"""
if value is not None:
if self.alphabet:
return str(value)
else:
return json.dumps({
'seq': str(value),
'alphabet': {
'type': value.alphabet.__class__.__name__,
'letters': value.alphabet.letters,
'size': value.alphabet.size,
},
})
return ''
def to_builtin(self, value):
""" Encode a value of the attribute using a simple Python representation (dict, list, str, float, bool, None)
that is compatible with JSON and YAML
Args:
value (:obj:`Bio.Seq.Seq`): value of the attribute
Returns:
:obj:`dict`: simple Python representation of a value of the attribute
"""
if value is None:
return None
else:
return {
'seq': str(value),
'alphabet': {
'type': value.alphabet.__class__.__name__,
'letters': value.alphabet.letters,
'size': value.alphabet.size,
},
}
def from_builtin(self, json):
""" Decode a simple Python representation (dict, list, str, float, bool, None) of a value of the attribute
that is compatible with JSON and YAML
Args:
json (:obj:`dict`): simple Python representation of a value of the attribute
Returns:
:obj:`Bio.Seq.Seq`: decoded value of the attribute
"""
if json is None:
return None
else:
alphabet = getattr(Bio.Alphabet, json['alphabet']['type'])()
alphabet.size = json['alphabet']['size']
alphabet.letters = json['alphabet']['letters']
return Bio.Seq.Seq(json['seq'], alphabet)
class BioDnaSeqAttribute(BioSeqAttribute):
""" Bio.Seq.Seq attribute with Bio.Alphabet.DNAAlphabet """
def __init__(self, min_length=0, max_length=float('inf'), default=None, none_value=None, verbose_name='', description='',
primary=False, unique=False):
"""
Args:
min_length (:obj:`int`, optional): minimum length
max_length (:obj:`int`, optional): maximum length
default (:obj:`Bio.Seq.Seq`, optional): default value
none_value (:obj:`object`, optional): none value
verbose_name (:obj:`str`, optional): verbose name
description (:obj:`str`, optional): description
primary (:obj:`bool`, optional): indicate if attribute is primary attribute
unique (:obj:`bool`, optional): indicate if attribute value must be unique
"""
super(BioDnaSeqAttribute, self).__init__(min_length=min_length, max_length=max_length, default=default,
none_value=none_value, verbose_name=verbose_name,
description=description,
primary=primary, unique=unique)
self.alphabet = Bio.Alphabet.DNAAlphabet()
class BioProteinSeqAttribute(BioSeqAttribute):
""" Bio.Seq.Seq attribute with Bio.Alphabet.ProteinAlphabet """
def __init__(self, min_length=0, max_length=float('inf'), default=None, none_value=None, verbose_name='', description='',
primary=False, unique=False):
"""
Args:
min_length (:obj:`int`, optional): minimum length
max_length (:obj:`int`, optional): maximum length
default (:obj:`Bio.Seq.Seq`, optional): default value
none_value (:obj:`object`, optional): none value
verbose_name (:obj:`str`, optional): verbose name
description (:obj:`str`, optional): description
primary (:obj:`bool`, optional): indicate if attribute is primary attribute
unique (:obj:`bool`, optional): indicate if attribute value must be unique
"""
super(BioProteinSeqAttribute, self).__init__(min_length=min_length, max_length=max_length, default=default,
none_value=none_value, verbose_name=verbose_name,
description=description,
primary=primary, unique=unique)
self.alphabet = Bio.Alphabet.ProteinAlphabet()
class BioRnaSeqAttribute(BioSeqAttribute):
""" Bio.Seq.Seq attribute with Bio.Alphabet.RNAAlphabet """
def __init__(self, min_length=0, max_length=float('inf'), default=None, none_value=None, verbose_name='', description='',
primary=False, unique=False):
"""
Args:
min_length (:obj:`int`, optional): minimum length
max_length (:obj:`int`, optional): maximum length
default (:obj:`Bio.Seq.Seq`, optional): default value
none_value (:obj:`object`, optional): none value
verbose_name (:obj:`str`, optional): verbose name
description (:obj:`str`, optional): description
primary (:obj:`bool`, optional): indicate if attribute is primary attribute
unique (:obj:`bool`, optional): indicate if attribute value must be unique
"""
super(BioRnaSeqAttribute, self).__init__(min_length=min_length, max_length=max_length, default=default,
none_value=none_value, verbose_name=verbose_name,
description=description,
primary=primary, unique=unique)
self.alphabet = Bio.Alphabet.RNAAlphabet()
class FrequencyPositionMatrixAttribute(core.LiteralAttribute):
""" Bio.motif.matrix.FrequencyPositionMatrix attribute """
def __init__(self, verbose_name='', description=''):
super(FrequencyPositionMatrixAttribute, self).__init__(
default=None, verbose_name=verbose_name,
description=description)
def validate(self, obj, value):
""" Determine if `value` is a valid value
Args:
obj (:obj:`Model`): class being validated
value (:obj:`Bio.motifs.matrix.FrequencyPositionMatrix`): value of attribute to validate
Returns:
:obj:`core.InvalidAttribute` or None: None if attribute is valid, other return list of errors as an instance of `core.InvalidAttribute`
"""
if value is not None and not isinstance(value, Bio.motifs.matrix.FrequencyPositionMatrix):
return core.InvalidAttribute(self, ['Value must be an instance of `Bio.motifs.matrix.FrequencyPositionMatrix`'])
return None
def serialize(self, value):
""" Serialize value to a string
Args:
value (:obj:`Bio.motifs.matrix.FrequencyPositionMatrix`): Python representation
Returns:
:obj:`str`: string representation
"""
if not value:
return ''
dict_value = {
'_alphabet': {
'type': value.alphabet.__class__.__name__,
'letters': value.alphabet.letters,
'size': value.alphabet.size,
},
}
for letter, counts in value.items():
dict_value[letter] = counts
return json.dumps(dict_value)
def deserialize(self, value):
""" Deserialize value
Args:
value (:obj:`str`): string representation
Returns:
:obj:`tuple` of `Bio.motifs.matrix.FrequencyPositionMatrix`, `core.InvalidAttribute` or `None`:
tuple of cleaned value and cleaning error
"""
if value:
try:
dict_value = json.loads(value)
alphabet = getattr(Bio.Alphabet, dict_value['_alphabet']['type'])()
alphabet.size = dict_value['_alphabet']['size']
alphabet.letters = dict_value['_alphabet']['letters']
dict_value.pop('_alphabet')
return (Bio.motifs.matrix.FrequencyPositionMatrix(alphabet, dict_value), None)
except Exception as error:
return (None, core.InvalidAttribute(self, [str(error)]))
else:
return (None, None)
def to_builtin(self, value):
""" Encode a value of the attribute using a simple Python representation (dict, list, str, float, bool, None)
that is compatible with JSON and YAML
Args:
value (:obj:`Bio.motifs.matrix.FrequencyPositionMatrix`): value of the attribute
Returns:
:obj:`dict`: simple Python representation of a value of the attribute
"""
if value is None:
return None
else:
json = {
'_alphabet': {
'type': value.alphabet.__class__.__name__,
'letters': value.alphabet.letters,
'size': value.alphabet.size,
},
}
for letter, counts in value.items():
json[letter] = counts
return json
def from_builtin(self, json):
""" Decode a simple Python representation (dict, list, str, float, bool, None) of a value of the attribute
that is compatible with JSON and YAML
Args:
json (:obj:`dict`): simple Python representation of a value of the attribute
Returns:
:obj:`Bio.motifs.matrix.FrequencyPositionMatrix`: decoded value of the attribute
"""
if json is None:
return None
else:
json = copy.copy(json)
alphabet = getattr(Bio.Alphabet, json['_alphabet']['type'])()
alphabet.size = json['_alphabet']['size']
alphabet.letters = json['_alphabet']['letters']
json.pop('_alphabet')
return Bio.motifs.matrix.FrequencyPositionMatrix(alphabet, json)
|
|
# -*- coding: utf-8 -*-
""" Sahana Eden GUI Layouts (HTML Renderers)
@copyright: 2012-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
@todo: - complete layout implementations
- render "selected" (flag in item)
"""
__all__ = ("S3MainMenuDefaultLayout",
"MM",
"S3OptionsMenuDefaultLayout",
"M",
"S3MenuSeparatorDefaultLayout",
"SEP",
"S3BreadcrumbsLayout",
"S3AddResourceLink",
"homepage",
)
from gluon import *
from s3 import *
from s3theme import NAV, SECTION
# =============================================================================
class S3MainMenuDefaultLayout(S3NavigationItem):
""" Application Main Menu Layout """
# Use the layout method of this class in templates/<theme>/layouts.py
# if it is available at runtime (otherwise fallback to this layout):
OVERRIDE = "S3MainMenuLayout"
@staticmethod
def layout(item):
""" Layout Method (Item Renderer) """
# Manage flags: hide any disabled/unauthorized items
if not item.authorized and not item.opts.always_display:
item.enabled = False
item.visible = False
elif item.enabled is None or item.enabled:
item.enabled = True
item.visible = True
if item.enabled and item.visible:
items = item.render_components()
if item.parent is not None:
if item.attr._class:
classes = item.attr._class.split(" ")
else:
classes = []
if item.parent.parent is None:
# Item at the top-level?
toplevel = True
if item.opts.right:
classes.append("menu-right")
else:
toplevel = False
if item.components:
classes.append("has-dropdown not-click")
_class = " ".join(classes)
# Menu item with Dropdown
if item.get_first(enabled=True):
_href = item.url()
return LI(A(item.label,
_href=_href,
_id=item.attr._id
),
UL(items,
_class="dropdown"
),
_class=_class,
)
else:
# Menu item without Drop-Down
if toplevel:
item_url = item.url()
if item_url == URL(c="default", f="index"):
classes.append("menu-home")
if item.selected:
classes.append("active")
_class = " ".join(classes)
return LI(A(item.label,
_href=item_url,
_id=item.attr._id,
_target=item.attr._target,
),
_class=_class,
)
else:
# Submenu item
if isinstance(item.label, dict):
if "id" in item.label:
return S3MainMenuDefaultLayout.checkbox_item(item)
elif "name" in item.label:
label = item.label["name"]
else:
return None
else:
label = item.label
link = A(label,
_href=item.url(),
_id=item.attr._id,
_target=item.attr._target,
)
_class = " ".join(classes)
return LI(link, _class=_class)
else:
# Main menu
if item.opts.title_area:
title_area = item.opts.title_area
else:
title_area = A(" ",
_href=URL(c="default", f="index"),
_class="S3menulogo",
)
right = []
left = []
for item in items:
if "menu-right" in item["_class"]:
item.remove_class("menu-right")
right.append(item)
else:
left.append(item)
right.reverse()
if current.response.s3.rtl:
right, left = left, right
return NAV(
UL(LI(title_area,
_class="name"
),
LI(A(SPAN(current.T("Menu"))),
_class="toggle-topbar menu-icon",
),
_class="title-area",
),
SECTION(UL(right,
_class="right"),
UL(left,
_class="left"),
_class="top-bar-section"),
_class = "top-bar",
data = {"topbar": " "},
)
else:
return None
# ---------------------------------------------------------------------
@staticmethod
def checkbox_item(item):
""" Render special active items """
name = item.label
link = item.url()
_id = name["id"]
if "name" in name:
_name = name["name"]
else:
_name = ""
if "value" in name:
_value = name["value"]
else:
_value = False
if "request_type" in name:
_request_type = name["request_type"]
else:
_request_type = "ajax"
if link:
if _request_type == "ajax":
_onchange='''var val=$('#%s:checked').length;$.getS3('%s'+'?val='+val,null,false,null,false,false)''' % \
(_id, link)
else:
# Just load the page. Use this if the changed menu
# item should alter the contents of the page, and
# it's simpler just to load it.
_onchange="location.href='%s'" % link
else:
_onchange=None
return LI(A(INPUT(_type="checkbox",
_id=_id,
_onchange=_onchange,
value=_value,
),
"%s" % _name,
_nowrap="nowrap",
),
_class="menu-toggle",
)
# =============================================================================
class S3OptionsMenuDefaultLayout(S3NavigationItem):
""" Controller Options Menu Layout """
# Use the layout method of this class in templates/<theme>/layouts.py
# if it is available at runtime (otherwise fallback to this layout):
OVERRIDE = "S3OptionsMenuLayout"
@staticmethod
def layout(item):
""" Layout Method (Item Renderer) """
# Manage flags: hide any disabled/unauthorized items
if not item.authorized:
enabled = False
visible = False
elif item.enabled is None or item.enabled:
enabled = True
visible = True
if enabled and visible:
if item.parent is not None:
if item.enabled and item.authorized:
attr = dict(_id = item.attr._id)
if item.attr._onclick:
attr["_onclick"] = item.attr._onclick
else:
attr["_href"] = item.url()
if item.components:
# Submenu
_class = ""
if item.parent.parent is None and item.selected:
_class = "active"
section = [LI(A(item.label,
**attr
),
_class="heading %s" % _class,
),
]
items = item.render_components()
if items:
section.append(UL(items))
return section
else:
# Submenu item
if item.parent.parent is None:
_class = "heading"
else:
_class = ""
return LI(A(item.label,
**attr
),
_class=_class,
)
else:
# Main menu
items = item.render_components()
return DIV(NAV(UL(items, _id="main-sub-menu", _class="side-nav")), _class="sidebar")
else:
return None
# =============================================================================
class S3MenuSeparatorDefaultLayout(S3NavigationItem):
""" Simple menu separator """
# Use the layout method of this class in templates/<theme>/layouts.py
# if it is available at runtime (otherwise fallback to this layout):
OVERRIDE = "S3MenuSeparatorLayout"
@staticmethod
def layout(item):
""" Layout Method (Item Renderer) """
if item.parent is not None:
return LI(_class="divider hide-for-small")
else:
return None
# =============================================================================
# Import menu layouts from template (if present)
#
MM = S3MainMenuDefaultLayout
M = S3OptionsMenuDefaultLayout
SEP = S3MenuSeparatorDefaultLayout
# =============================================================================
class S3BreadcrumbsLayout(S3NavigationItem):
""" Breadcrumbs layout """
@staticmethod
def layout(item):
if item.parent is None:
items = item.render_components()
return DIV(UL(items), _class='breadcrumbs')
else:
if item.is_last():
_class = "highlight"
else:
_class = "ancestor"
return LI(A(item.label, _href=item.url(), _class=_class))
# =============================================================================
class S3HomepageMenuLayout(S3NavigationItem):
"""
Layout for homepage menu
@todo: better design, robust/responsive CSS, utilize Foundation!
"""
@staticmethod
def layout(item):
""" Layout Method (Item Renderer) """
# Manage flags: hide any disabled/unauthorized items
if not item.authorized and not item.opts.always_display:
item.enabled = False
item.visible = False
elif item.enabled is None or item.enabled:
item.enabled = True
item.visible = True
if item.enabled and item.visible:
items = item.render_components()
if item.parent is None:
# Top level (menu box)
arrow = "/%s/static/img/arrow_blue_right.png" % current.request.application
components = []
append = components.append
number_of_links = 0
for submenu in items:
append(submenu)
if item.opts.arrows:
append(DIV(IMG(_src=arrow), _class="div_arrow"))
number_of_links += len(submenu.elements("a"))
if not number_of_links:
# Hide the entire menu if it doesn't contain any links
return None
elif item.label:
components.insert(0, H3(item.label))
if item.opts.arrows:
# Remove the last arrow
components = components[:-1]
menu = DIV(TAG[""](components),
_id = item.attr._id,
_class = item.attr._class,
)
menu.add_class("menu_box")
return menu
else:
if item.components:
# Branch node (submenu, menu div)
_class = item.attr._class
if not _class:
_class = "menu_div"
return DIV(H3(item.label),
TAG[""](items),
_id = item.attr._id,
_class=_class,
)
else:
# Leaf node (menu item)
if item.opts.icon:
# Icon-type item
return A(IMG(_src=item.opts.icon),
_href=item.url(),
_title=item.label,
)
else:
# Button-type item
return A(DIV(item.label,
_class="menu-btn-r",
),
_class="menu-btn-l",
_href=item.url(),
)
else:
return None
# =============================================================================
class S3AddResourceLink(S3NavigationItem):
"""
Links in form fields comments to show a form for adding
a new foreign key record.
"""
def __init__(self,
label=None,
c=None,
f=None,
t=None,
vars=None,
info=None,
title=None,
tooltip=None,
):
"""
Constructor
@param c: the target controller
@param f: the target function
@param t: the target table (defaults to c_f)
@param vars: the request vars (format="popup" will be added automatically)
@param label: the link label (falls back to label_create)
@param info: hover-title for the label
@param title: the tooltip title
@param tooltip: the tooltip text
"""
if label is None:
label = title
if info is None:
info = title
if c is None:
# Fall back to current controller
c = current.request.controller
if label is None:
# Fall back to label_create
if t is None:
t = "%s_%s" % (c, f)
label = S3CRUD.crud_string(t, "label_create")
return super(S3AddResourceLink, self).__init__(label,
c=c, f=f, t=t,
m="create",
vars=vars,
info=info,
title=title,
tooltip=tooltip,
mandatory=True)
# -------------------------------------------------------------------------
@staticmethod
def layout(item):
""" Layout for popup link """
if not item.authorized:
return None
if current.deployment_settings.get_ui_use_button_icons():
from s3.s3widgets import ICON
label = (ICON("add"), item.label)
else:
label = item.label
popup_link = A(label,
_href=item.url(format="popup"),
_class="s3_add_resource_link",
_id="%s_add" % item.function,
_target="top",
_title=item.opts.info,
)
tooltip = item.opts.tooltip
if tooltip is not None:
ttip = DIV(_class="tooltip",
_title="%s|%s" % (item.opts.title, tooltip))
else:
ttip = ""
return TAG[""](popup_link, ttip)
# -------------------------------------------------------------------------
@staticmethod
def inline(item):
""" Render this link for an inline component """
if not item.authorized:
return None
popup_link = A(item.label,
_href=item.url(format="popup"),
_class="s3_add_resource_link action-lnk",
_id="%s_%s_add" % (item.vars["caller"], item.function),
_target="top",
_title=item.opts.info,
)
return DIV(popup_link, _class="s3_inline_add_resource_link")
# =============================================================================
def homepage(module=None, *match, **attr):
"""
Shortcut for module homepage menu items using the MM layout,
retrieves the module's nice name.
@param module: the module's prefix (controller)
@param match: additional prefixes
@param attr: attributes for the navigation item
"""
settings = current.deployment_settings
all_modules = settings.modules
layout = S3MainMenuDefaultLayout
c = [module] + list(match)
if "name" in attr:
name = attr["name"]
attr.pop("name")
else:
if module is None:
module = "default"
if module in all_modules:
m = all_modules[module]
name = m.name_nice
else:
name = module
if "f" in attr:
f = attr["f"]
del attr["f"]
else:
f = "index"
return layout(name, c=c, f=f, **attr)
# END =========================================================================
|
|
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility functions for Image transfer and manipulation.
"""
import os
import tarfile
import tempfile
from lxml import etree
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import units
from oslo_vmware import rw_handles
import six
from nova import exception
from nova.i18n import _, _LE, _LI
from nova import image
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import io_util
# NOTE(mdbooth): We use use_linked_clone below, but don't have to import it
# because nova.virt.vmwareapi.driver is imported first. In fact, it is not
# possible to import it here, as nova.virt.vmwareapi.driver calls
# CONF.register_opts() after the import chain which imports this module. This
# is not a problem as long as the import order doesn't change.
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
QUEUE_BUFFER_SIZE = 10
LINKED_CLONE_PROPERTY = 'vmware_linked_clone'
class VMwareImage(object):
def __init__(self, image_id,
file_size=0,
os_type=constants.DEFAULT_OS_TYPE,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE,
container_format=constants.CONTAINER_FORMAT_BARE,
file_type=constants.DEFAULT_DISK_FORMAT,
linked_clone=None,
vif_model=constants.DEFAULT_VIF_MODEL):
"""VMwareImage holds values for use in building VMs.
image_id (str): uuid of the image
file_size (int): size of file in bytes
os_type (str): name of guest os (use vSphere names only)
adapter_type (str): name of the adapter's type
disk_type (str): type of disk in thin, thick, etc
container_format (str): container format (bare or ova)
file_type (str): vmdk or iso
linked_clone(bool): use linked clone, or don't
"""
self.image_id = image_id
self.file_size = file_size
self.os_type = os_type
self.adapter_type = adapter_type
self.container_format = container_format
self.disk_type = disk_type
self.file_type = file_type
# NOTE(vui): This should be removed when we restore the
# descriptor-based validation.
if (self.file_type is not None and
self.file_type not in constants.DISK_FORMATS_ALL):
raise exception.InvalidDiskFormat(disk_format=self.file_type)
if linked_clone is not None:
self.linked_clone = linked_clone
else:
self.linked_clone = CONF.vmware.use_linked_clone
self.vif_model = vif_model
@property
def file_size_in_kb(self):
return self.file_size / units.Ki
@property
def is_sparse(self):
return self.disk_type == constants.DISK_TYPE_SPARSE
@property
def is_iso(self):
return self.file_type == constants.DISK_FORMAT_ISO
@property
def is_ova(self):
return self.container_format == constants.CONTAINER_FORMAT_OVA
@classmethod
def from_image(cls, image_id, image_meta=None):
"""Returns VMwareImage, the subset of properties the driver uses.
:param image_id - image id of image
:param image_meta - image metadata we are working with
:return: vmware image object
:rtype: nova.virt.vmwareapi.images.VmwareImage
"""
if image_meta is None:
image_meta = {}
properties = image_meta.get("properties", {})
# calculate linked_clone flag, allow image properties to override the
# global property set in the configurations.
image_linked_clone = properties.get(LINKED_CLONE_PROPERTY,
CONF.vmware.use_linked_clone)
# catch any string values that need to be interpreted as boolean values
linked_clone = strutils.bool_from_string(image_linked_clone)
props = {
'image_id': image_id,
'linked_clone': linked_clone,
'container_format': image_meta.get('container_format')
}
if 'size' in image_meta:
props['file_size'] = image_meta['size']
if 'disk_format' in image_meta:
props['file_type'] = image_meta['disk_format']
props_map = {
'vmware_ostype': 'os_type',
'vmware_adaptertype': 'adapter_type',
'vmware_disktype': 'disk_type',
'hw_vif_model': 'vif_model'
}
for k, v in six.iteritems(props_map):
if k in properties:
props[v] = properties[k]
return cls(**props)
def start_transfer(context, read_file_handle, data_size,
write_file_handle=None, image_id=None, image_meta=None):
"""Start the data transfer from the reader to the writer.
Reader writes to the pipe and the writer reads from the pipe. This means
that the total transfer time boils down to the slower of the read/write
and not the addition of the two times.
"""
if not image_meta:
image_meta = {}
# The pipe that acts as an intermediate store of data for reader to write
# to and writer to grab from.
thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
# The read thread. In case of glance it is the instance of the
# GlanceFileRead class. The glance client read returns an iterator
# and this class wraps that iterator to provide datachunks in calls
# to read.
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
# In case of Glance - VMware transfer, we just need a handle to the
# HTTP Connection that is to send transfer data to the VMware datastore.
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
# In case of VMware - Glance transfer, we relinquish VMware HTTP file read
# handle to Glance Client instance, but to be sure of the transfer we need
# to be sure of the status of the image on glance changing to active.
# The GlanceWriteThread handles the same for us.
elif image_id:
write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
image_id, image_meta)
# Start the read and write threads.
read_event = read_thread.start()
write_event = write_thread.start()
try:
# Wait on the read and write events to signal their end
read_event.wait()
write_event.wait()
except Exception as exc:
# In case of any of the reads or writes raising an exception,
# stop the threads so that we un-necessarily don't keep the other one
# waiting.
read_thread.stop()
write_thread.stop()
# Log and raise the exception.
LOG.exception(_LE('Transfer data failed'))
raise exception.NovaException(exc)
finally:
# No matter what, try closing the read and write handles, if it so
# applies.
read_file_handle.close()
if write_file_handle:
write_file_handle.close()
def upload_iso_to_datastore(iso_path, instance, **kwargs):
LOG.debug("Uploading iso %s to datastore", iso_path,
instance=instance)
with open(iso_path, 'r') as iso_file:
write_file_handle = rw_handles.FileWriteHandle(
kwargs.get("host"),
kwargs.get("port"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"),
os.fstat(iso_file.fileno()).st_size)
LOG.debug("Uploading iso of size : %s ",
os.fstat(iso_file.fileno()).st_size)
block_size = 0x10000
data = iso_file.read(block_size)
while len(data) > 0:
write_file_handle.write(data)
data = iso_file.read(block_size)
write_file_handle.close()
LOG.debug("Uploaded iso %s to datastore", iso_path,
instance=instance)
def fetch_image(context, instance, host, port, dc_name, ds_name, file_path,
cookies=None):
"""Download image from the glance image server."""
image_ref = instance.image_ref
LOG.debug("Downloading image file data %(image_ref)s to the "
"data store %(data_store_name)s",
{'image_ref': image_ref,
'data_store_name': ds_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
read_iter = IMAGE_API.download(context, image_ref)
read_file_handle = rw_handles.ImageReadHandle(read_iter)
write_file_handle = rw_handles.FileWriteHandle(
host, port, dc_name, ds_name, cookies, file_path, file_size)
start_transfer(context, read_file_handle, file_size,
write_file_handle=write_file_handle)
LOG.debug("Downloaded image file data %(image_ref)s to "
"%(upload_name)s on the data store "
"%(data_store_name)s",
{'image_ref': image_ref,
'upload_name': 'n/a' if file_path is None else file_path,
'data_store_name': 'n/a' if ds_name is None else ds_name},
instance=instance)
def _build_shadow_vm_config_spec(session, name, size_kb, disk_type, ds_name):
"""Return spec for creating a shadow VM for image disk.
The VM is never meant to be powered on. When used in importing
a disk it governs the directory name created for the VM
and the disk type of the disk image to convert to.
:param name: Name of the backing
:param size_kb: Size in KB of the backing
:param disk_type: VMDK type for the disk
:param ds_name: Datastore name where the disk is to be provisioned
:return: Spec for creation
"""
cf = session.vim.client.factory
controller_device = cf.create('ns0:VirtualLsiLogicController')
controller_device.key = -100
controller_device.busNumber = 0
controller_device.sharedBus = 'noSharing'
controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
controller_spec.operation = 'add'
controller_spec.device = controller_device
disk_device = cf.create('ns0:VirtualDisk')
# for very small disks allocate at least 1KB
disk_device.capacityInKB = max(1, int(size_kb))
disk_device.key = -101
disk_device.unitNumber = 0
disk_device.controllerKey = -100
disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
if disk_type == constants.DISK_TYPE_EAGER_ZEROED_THICK:
disk_device_bkng.eagerlyScrub = True
elif disk_type == constants.DISK_TYPE_THIN:
disk_device_bkng.thinProvisioned = True
disk_device_bkng.fileName = '[%s]' % ds_name
disk_device_bkng.diskMode = 'persistent'
disk_device.backing = disk_device_bkng
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'add'
disk_spec.fileOperation = 'create'
disk_spec.device = disk_device
vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = '[%s]' % ds_name
create_spec = cf.create('ns0:VirtualMachineConfigSpec')
create_spec.name = name
create_spec.guestId = 'otherGuest'
create_spec.numCPUs = 1
create_spec.memoryMB = 128
create_spec.deviceChange = [controller_spec, disk_spec]
create_spec.files = vm_file_info
return create_spec
def _build_import_spec_for_import_vapp(session, vm_name, datastore_name):
vm_create_spec = _build_shadow_vm_config_spec(
session, vm_name, 0, constants.DISK_TYPE_THIN, datastore_name)
client_factory = session.vim.client.factory
vm_import_spec = client_factory.create('ns0:VirtualMachineImportSpec')
vm_import_spec.configSpec = vm_create_spec
return vm_import_spec
def fetch_image_stream_optimized(context, instance, session, vm_name,
ds_name, vm_folder_ref, res_pool_ref):
"""Fetch image from Glance to ESX datastore."""
image_ref = instance.image_ref
LOG.debug("Downloading image file data %(image_ref)s to the ESX "
"as VM named '%(vm_name)s'",
{'image_ref': image_ref, 'vm_name': vm_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
vm_import_spec = _build_import_spec_for_import_vapp(
session, vm_name, ds_name)
read_iter = IMAGE_API.download(context, image_ref)
read_handle = rw_handles.ImageReadHandle(read_iter)
write_handle = rw_handles.VmdkWriteHandle(session,
session._host,
session._port,
res_pool_ref,
vm_folder_ref,
vm_import_spec,
file_size)
start_transfer(context,
read_handle,
file_size,
write_file_handle=write_handle)
imported_vm_ref = write_handle.get_imported_vm()
LOG.info(_LI("Downloaded image file data %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
session._call_method(session.vim, "UnregisterVM", imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"), instance=instance)
def get_vmdk_name_from_ovf(xmlstr):
"""Parse the OVA descriptor to extract the vmdk name."""
ovf = etree.fromstring(xmlstr)
nsovf = "{%s}" % ovf.nsmap["ovf"]
disk = ovf.find("./%sDiskSection/%sDisk" % (nsovf, nsovf))
file_id = disk.get("%sfileRef" % nsovf)
file = ovf.find('./%sReferences/%sFile[@%sid="%s"]' % (nsovf, nsovf,
nsovf, file_id))
vmdk_name = file.get("%shref" % nsovf)
return vmdk_name
def fetch_image_ova(context, instance, session, vm_name, ds_name,
vm_folder_ref, res_pool_ref):
"""Download the OVA image from the glance image server to the
Nova compute node.
"""
image_ref = instance.image_ref
LOG.debug("Downloading OVA image file %(image_ref)s to the ESX "
"as VM named '%(vm_name)s'",
{'image_ref': image_ref, 'vm_name': vm_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
vm_import_spec = _build_import_spec_for_import_vapp(
session, vm_name, ds_name)
read_iter = IMAGE_API.download(context, image_ref)
ova_fd, ova_path = tempfile.mkstemp()
try:
# NOTE(arnaud): Look to eliminate first writing OVA to file system
with os.fdopen(ova_fd, 'w') as fp:
for chunk in read_iter:
fp.write(chunk)
with tarfile.open(ova_path, mode="r") as tar:
vmdk_name = None
for tar_info in tar:
if tar_info and tar_info.name.endswith(".ovf"):
extracted = tar.extractfile(tar_info.name)
xmlstr = extracted.read()
vmdk_name = get_vmdk_name_from_ovf(xmlstr)
elif vmdk_name and tar_info.name.startswith(vmdk_name):
# Actual file name is <vmdk_name>.XXXXXXX
extracted = tar.extractfile(tar_info.name)
write_handle = rw_handles.VmdkWriteHandle(
session,
session._host,
session._port,
res_pool_ref,
vm_folder_ref,
vm_import_spec,
file_size)
start_transfer(context,
extracted,
file_size,
write_file_handle=write_handle)
extracted.close()
LOG.info(_LI("Downloaded OVA image file %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
imported_vm_ref = write_handle.get_imported_vm()
session._call_method(session.vim, "UnregisterVM",
imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"),
instance=instance)
return
raise exception.ImageUnacceptable(
reason=_("Extracting vmdk from OVA failed."),
image_id=image_ref)
finally:
os.unlink(ova_path)
def upload_image_stream_optimized(context, image_id, instance, session,
vm, vmdk_size):
"""Upload the snapshotted vm disk file to Glance image server."""
LOG.debug("Uploading image %s", image_id, instance=instance)
metadata = IMAGE_API.get(context, image_id)
read_handle = rw_handles.VmdkReadHandle(session,
session._host,
session._port,
vm,
None,
vmdk_size)
# Set the image properties. It is important to set the 'size' to 0.
# Otherwise, the image service client will use the VM's disk capacity
# which will not be the image size after upload, since it is converted
# to a stream-optimized sparse disk.
image_metadata = {'disk_format': 'vmdk',
'is_public': metadata['is_public'],
'name': metadata['name'],
'status': 'active',
'container_format': 'bare',
'size': 0,
'properties': {'vmware_image_version': 1,
'vmware_disktype': 'streamOptimized',
'owner_id': instance.project_id}}
# Passing 0 as the file size since data size to be transferred cannot be
# predetermined.
start_transfer(context,
read_handle,
0,
image_id=image_id,
image_meta=image_metadata)
LOG.debug("Uploaded image %s to the Glance image server", image_id,
instance=instance)
|
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from collections import defaultdict
from dataclasses import dataclass
from typing import Tuple
from pants.backend.python.lint.pylint.subsystem import (
Pylint,
PylintFieldSet,
PylintFirstPartyPlugins,
)
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import (
InterpreterConstraintsField,
PythonResolveField,
PythonSourceField,
)
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import (
Pex,
PexRequest,
VenvPex,
VenvPexProcess,
VenvPexRequest,
)
from pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.lint import REPORT_DIR, LintResult, LintResults, LintTargetsRequest
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.collection import Collection
from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests, RemovePrefix
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import Target, TransitiveTargets, TransitiveTargetsRequest
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class PylintPartition:
root_targets: FrozenOrderedSet[Target]
closure: FrozenOrderedSet[Target]
interpreter_constraints: InterpreterConstraints
class PylintPartitions(Collection[PylintPartition]):
pass
class PylintRequest(LintTargetsRequest):
field_set_type = PylintFieldSet
name = Pylint.options_scope
def generate_argv(source_files: SourceFiles, pylint: Pylint) -> Tuple[str, ...]:
args = []
if pylint.config is not None:
args.append(f"--rcfile={pylint.config}")
args.append("--jobs={pants_concurrency}")
args.extend(pylint.args)
args.extend(source_files.files)
return tuple(args)
@rule(level=LogLevel.DEBUG)
async def pylint_lint_partition(
partition: PylintPartition, pylint: Pylint, first_party_plugins: PylintFirstPartyPlugins
) -> LintResult:
requirements_pex_get = Get(
Pex,
RequirementsPexRequest(
(t.address for t in partition.root_targets),
# NB: These constraints must be identical to the other PEXes. Otherwise, we risk using
# a different version for the requirements than the other two PEXes, which can result
# in a PEX runtime error about missing dependencies.
hardcoded_interpreter_constraints=partition.interpreter_constraints,
internal_only=True,
),
)
pylint_pex_get = Get(
Pex,
PexRequest,
pylint.to_pex_request(
interpreter_constraints=partition.interpreter_constraints,
extra_requirements=first_party_plugins.requirement_strings,
),
)
prepare_python_sources_get = Get(PythonSourceFiles, PythonSourceFilesRequest(partition.closure))
field_set_sources_get = Get(
SourceFiles, SourceFilesRequest(t[PythonSourceField] for t in partition.root_targets)
)
# Ensure that the empty report dir exists.
report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))
(
pylint_pex,
requirements_pex,
prepared_python_sources,
field_set_sources,
report_directory,
) = await MultiGet(
pylint_pex_get,
requirements_pex_get,
prepare_python_sources_get,
field_set_sources_get,
report_directory_digest_get,
)
pylint_runner_pex, config_files = await MultiGet(
Get(
VenvPex,
VenvPexRequest(
PexRequest(
output_filename="pylint_runner.pex",
interpreter_constraints=partition.interpreter_constraints,
main=pylint.main,
internal_only=True,
pex_path=[pylint_pex, requirements_pex],
),
# TODO(John Sirois): Remove this (change to the default of symlinks) when we can
# upgrade to a version of Pylint with https://github.com/PyCQA/pylint/issues/1470
# resolved.
site_packages_copies=True,
),
),
Get(
ConfigFiles, ConfigFilesRequest, pylint.config_request(field_set_sources.snapshot.dirs)
),
)
pythonpath = list(prepared_python_sources.source_roots)
if first_party_plugins:
pythonpath.append(first_party_plugins.PREFIX)
input_digest = await Get(
Digest,
MergeDigests(
(
config_files.snapshot.digest,
first_party_plugins.sources_digest,
prepared_python_sources.source_files.snapshot.digest,
report_directory,
)
),
)
result = await Get(
FallibleProcessResult,
VenvPexProcess(
pylint_runner_pex,
argv=generate_argv(field_set_sources, pylint),
input_digest=input_digest,
output_directories=(REPORT_DIR,),
extra_env={"PEX_EXTRA_SYS_PATH": ":".join(pythonpath)},
concurrency_available=len(partition.root_targets),
description=f"Run Pylint on {pluralize(len(partition.root_targets), 'file')}.",
level=LogLevel.DEBUG,
),
)
report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))
return LintResult.from_fallible_process_result(
result,
partition_description=str(sorted(str(c) for c in partition.interpreter_constraints)),
report=report,
)
# TODO(#10863): Improve the performance of this, especially by not needing to calculate transitive
# targets per field set. Doing that would require changing how we calculate interpreter
# constraints to be more like how we determine resolves, i.e. only inspecting the root target
# (and later validating the closure is compatible).
@rule(desc="Determine if necessary to partition MyPy input", level=LogLevel.DEBUG)
async def pylint_determine_partitions(
request: PylintRequest, python_setup: PythonSetup, first_party_plugins: PylintFirstPartyPlugins
) -> PylintPartitions:
# We batch targets by their interpreter constraints + resolve to ensure, for example, that all
# Python targets run together and all Python 3 targets run together.
#
# Note that Pylint uses the AST of the interpreter that runs it. So, we include any plugin
# targets in this interpreter constraints calculation. However, we don't have to consider the
# resolve of the plugin targets, per https://github.com/pantsbuild/pants/issues/14320.
transitive_targets_per_field_set = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address]))
for field_set in request.field_sets
)
resolve_and_interpreter_constraints_to_transitive_targets = defaultdict(set)
for transitive_targets in transitive_targets_per_field_set:
resolve = transitive_targets.roots[0][PythonResolveField].normalized_value(python_setup)
interpreter_constraints = InterpreterConstraints.create_from_compatibility_fields(
(
*(
tgt[InterpreterConstraintsField]
for tgt in transitive_targets.closure
if tgt.has_field(InterpreterConstraintsField)
),
*first_party_plugins.interpreter_constraints_fields,
),
python_setup,
)
resolve_and_interpreter_constraints_to_transitive_targets[
(resolve, interpreter_constraints)
].add(transitive_targets)
partitions = []
for (_resolve, interpreter_constraints), all_transitive_targets in sorted(
resolve_and_interpreter_constraints_to_transitive_targets.items()
):
combined_roots: OrderedSet[Target] = OrderedSet()
combined_closure: OrderedSet[Target] = OrderedSet()
for transitive_targets in all_transitive_targets:
combined_roots.update(transitive_targets.roots)
combined_closure.update(transitive_targets.closure)
partitions.append(
# Note that we don't need to pass the resolve. pex_from_targets.py will already
# calculate it by inspecting the roots & validating that all dependees are valid.
PylintPartition(
FrozenOrderedSet(combined_roots),
FrozenOrderedSet(combined_closure),
interpreter_constraints,
)
)
return PylintPartitions(partitions)
@rule(desc="Lint using Pylint", level=LogLevel.DEBUG)
async def pylint_lint(request: PylintRequest, pylint: Pylint) -> LintResults:
if pylint.skip:
return LintResults([], linter_name=request.name)
partitions = await Get(PylintPartitions, PylintRequest, request)
partitioned_results = await MultiGet(
Get(LintResult, PylintPartition, partition) for partition in partitions
)
return LintResults(partitioned_results, linter_name=request.name)
def rules():
return [
*collect_rules(),
UnionRule(LintTargetsRequest, PylintRequest),
*pex_from_targets.rules(),
]
|
|
# Licensed under an MIT open source license - see LICENSE
import abc
import warnings
from functools import wraps
from weakref import WeakKeyDictionary
import numpy as np
from astropy.units import Quantity
from astropy.table import Table
from astropy import units as u
from astropy.wcs import WCS
from . import six
from .structure import Structure
from .flux import UnitMetadataWarning
from .progressbar import AnimatedProgressBar
__all__ = ['ppp_catalog', 'ppv_catalog', 'pp_catalog']
def memoize(func):
# cache[instance][method args] -> method result
# hold weakrefs to instances,
# to stay out of the way of the garbage collector
cache = WeakKeyDictionary()
@wraps(func)
def wrapper(self, *args):
try:
return cache[self][args]
except KeyError:
cache.setdefault(self, {})[args] = func(self, *args)
return cache[self][args]
except TypeError:
warnings.warn("Cannot memoize inputs to %s" % func)
return func(self, *args)
return wrapper
class MissingMetadataWarning(UserWarning):
pass
def _qsplit(q):
"""Split a potential astropy Quantity into unit/quantity"""
if isinstance(1. * q, Quantity):
return q.unit, q.value
return 1, q
def _unit(q):
"""Return the units associated with a number, array, unit, or Quantity"""
if q is None:
return None
elif isinstance(1 * q, Quantity):
return (1 * q).unit
class ScalarStatistic(object):
# This class does all of the heavy computation
def __init__(self, values, indices):
"""
Compute pixel-level statistics from a scalar field, sampled at specific
locations.
Parameters
----------
values : 1D ndarray
data values to use
indices: tuple of 1D arrays
Location of each element of values. The i-th array in the tuple
describes the ith positional dimension
"""
self.values = values.astype(np.float)
self.indices = indices
@memoize
def mom0(self):
"""The sum of the values"""
return np.nansum(self.values)
@memoize
def mom1(self):
"""The intensity-weighted mean position"""
m0 = self.mom0()
return [np.nansum(i * self.values) / m0 for i in self.indices]
@memoize
def mom2(self):
"""The intensity-weighted covariance matrix"""
mom1 = self.mom1()
mom0 = self.mom0()
v = self.values / mom0
nd = len(self.indices)
zyx = tuple(i - m for i, m in zip(self.indices, mom1))
result = np.zeros((nd, nd))
for i in range(nd):
result[i, i] = np.nansum(v * zyx[i] ** 2)
for j in range(i + 1, nd):
result[i, j] = result[j, i] = np.nansum(v * zyx[i] * zyx[j])
return result
@memoize
def mom2_along(self, direction):
"""
Intensity-weighted variance/covariance along 1 or more directions.
Parameters
----------
direction : array like
One or more set of direction vectors. Need not be normalized
Returns
-------
result : array
The variance (or co-variance matrix) of the data along the
specified direction(s).
"""
w = np.atleast_2d(direction).astype(np.float)
for row in w:
row /= np.linalg.norm(row)
result = np.dot(np.dot(w, self.mom2()), w.T)
if result.size == 1:
result = np.asscalar(result)
return result
@memoize
def paxes(self):
"""
The principal axes of the data (direction of greatest elongation)
Returns
-------
result : tuple
Ordered tuple of ndarrays
Notes
-----
Each array is a normalized direction vector. The arrays
are sorted in decreasing order of elongation of the data
"""
mom2 = self.mom2()
w, v = np.linalg.eig(mom2)
order = np.argsort(w)
return tuple(v[:, o] for o in order[::-1])
@memoize
def projected_paxes(self, axes):
"""
The principal axes of a projection of the data onto a subspace
Paramters
---------
axes : array-like, (nnew, nold)
The projection to take. Each row defines a unit vector in
the new coordinate system
Returns
--------
result : tuple
Tuple of arrays (nnew items)
Notes
-----
The ordered principal axes in the new space
"""
axes = tuple(axes)
mom2 = self.mom2_along(axes)
w, v = np.linalg.eig(mom2)
order = np.argsort(w)
return tuple(v[:, o] for o in order[::-1])
@memoize
def count(self):
"""
Number of elements in the dataset.
"""
return self.values.size
def surface_area(self):
raise NotImplementedError
def perimeter(self, plane=None):
raise NotImplementedError
class VectorStatistic(object):
def __init__(self, values_tuple, indices):
raise NotImplementedError
def divergence(self):
raise NotImplementedError
def curl(self):
raise NotImplementedError
class Metadata(object):
"""
A descriptor to wrap around metadata dictionaries.
Lets classes reference self.x instead of self.metadata['x'],
"""
_restrict_types = None
def __init__(self, key, description, default=None, strict=False):
"""
Parameters
----------
key : str
Metadata name.
description : str
What the quantity describes
default : scalar
Default value if metadata not provided
strict : bool
If True, raise KeyError if metadata not provided.
This overrides default
"""
if not isinstance(key, six.string_types):
raise TypeError("Key is", key, type(key))
self.key = key
self.description = description or 'no description'
self.default = default
self.strict = strict
def __get__(self, instance, type=None):
if instance is None:
return self
try:
value = instance.metadata[self.key]
except KeyError:
if self.strict:
raise KeyError("Required metadata item not found: %s" % self)
else:
if self.default is not None:
warnings.warn("{0} ({1}) missing, defaulting to {2}".format(self.key, self.description, self.default),
MissingMetadataWarning)
value = self.default
if value is not None and self._restrict_types is not None:
if isinstance(value, self._restrict_types):
return value
else:
raise TypeError("{0} should be an instance of {1}".format(self.key, ' or '.join([x.__name__ for x in self._restrict_types])))
else:
return value
def __str__(self):
return "%s (%s)" % (self.key, self.description)
class MetadataQuantity(Metadata):
_restrict_types = (u.UnitBase, u.Quantity)
class MetadataWCS(Metadata):
_restrict_types = (WCS,)
class SpatialBase(object):
__metaclass__ = abc.ABCMeta
wavelength = MetadataQuantity('wavelength', 'Wavelength')
spatial_scale = MetadataQuantity('spatial_scale', 'Pixel width/height')
beam_major = MetadataQuantity('beam_major', 'Major FWHM of beam')
beam_minor = MetadataQuantity('beam_minor', 'Minor FWHM of beam')
data_unit = MetadataQuantity('data_unit', 'Units of the pixel values', strict=True)
wcs = MetadataWCS('wcs', 'WCS object')
@abc.abstractmethod
def _sky_paxes(self):
raise NotImplementedError()
def _world_pos(self):
xyz = self.stat.mom1()[::-1]
if self.wcs is None:
return xyz[::-1] * u.pixel
else:
# TODO: set units correctly following WCS
# We use origin=0 since the indices come from Numpy indexing
return self.wcs.all_pix2world([xyz], 0).ravel()[::-1]
@abc.abstractproperty
def flux(self):
raise NotImplementedError
@abc.abstractproperty
def x_cen(self):
raise NotImplementedError()
@abc.abstractproperty
def y_cen(self):
raise NotImplementedError()
@abc.abstractproperty
def position_angle(self):
raise NotImplementedError()
@property
def major_sigma(self):
"""
Major axis of the projection onto the position-position (PP) plane,
computed from the intensity weighted second moment in direction of
greatest elongation in the PP plane.
"""
dx = self.spatial_scale or u.pixel
a, b = self._sky_paxes()
# We need to multiply the second moment by two to get the major axis
# rather than the half-major axis.
return dx * np.sqrt(self.stat.mom2_along(tuple(a)))
@property
def minor_sigma(self):
"""
Minor axis of the projection onto the position-position (PP) plane,
computed from the intensity weighted second moment perpendicular to
the major axis in the PP plane.
"""
dx = self.spatial_scale or u.pixel
a, b = self._sky_paxes()
# We need to multiply the second moment by two to get the minor axis
# rather than the half-minor axis.
return dx * np.sqrt(self.stat.mom2_along(tuple(b)))
@property
def radius(self):
"""
Geometric mean of ``major_sigma`` and ``minor_sigma``.
"""
u, a = _qsplit(self.major_sigma)
u, b = _qsplit(self.minor_sigma)
return u * np.sqrt(a * b)
@property
def area_ellipse(self):
"""
The area of the ellipse defined by the second moments, where the
semi-major and semi-minor axes used are the HWHM (half-width at
half-maximum) derived from the moments.
"""
return np.pi * self.major_sigma * self.minor_sigma * (2.3548 * 0.5) ** 2
def to_mpl_ellipse(self, **kwargs):
"""
Returns a Matplotlib ellipse representing the first and second moments
of the structure.
Any keyword arguments are passed to :class:`~matplotlib.patches.Ellipse`
"""
from matplotlib.patches import Ellipse
return Ellipse((self.x_cen.value, self.y_cen.value),
self.major_sigma.value * 2.3548,
self.minor_sigma.value * 2.3548,
angle=self.position_angle.value,
**kwargs)
class PPVStatistic(SpatialBase):
"""
Compute properties of structures in a position-position-velocity (PPV)
cube.
Parameters
----------
structure : :class:`~astrodendro.structure.Structure` instance
The structure to compute the statistics for
metadata : dict
Key-value pairs of metadata
"""
velocity_scale = MetadataQuantity('velocity_scale', 'Velocity channel width')
vaxis = Metadata('vaxis', 'Index of velocity axis (numpy convention)', default=0)
def __init__(self, stat, metadata=None):
if isinstance(stat, Structure):
self.stat = ScalarStatistic(stat.values(subtree=True),
stat.indices(subtree=True))
else:
self.stat = stat
if len(self.stat.indices) != 3:
raise ValueError("PPVStatistic can only be used on 3-d datasets")
self.metadata = metadata or {}
def _sky_paxes(self):
vaxis = self.vaxis
ax = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
ax.pop(vaxis)
a, b = self.stat.projected_paxes(tuple(ax))
a = list(a)
a.insert(0, vaxis)
b = list(b)
b.insert(0, vaxis)
return tuple(a), tuple(b)
@property
def x_cen(self):
"""
The mean position of the structure in the x direction.
"""
p = self._world_pos()
return p[2] if self.vaxis != 2 else p[1]
@property
def y_cen(self):
"""
The mean position of the structure in the y direction.
"""
p = self._world_pos()
return p[1] if self.vaxis == 0 else p[0]
@property
def v_cen(self):
"""
The mean velocity of the structure (where the velocity axis can be
specified by the ``vaxis`` metadata parameter, which defaults to 0
following the Numpy convention - the third axis in the FITS convention).
"""
p = self._world_pos()
return p[self.vaxis]
@property
def flux(self):
"""
The integrated flux of the structure, in Jy (note that this does not
include any kind of background subtraction, and is just a plain sum of
the values in the structure, converted to Jy).
"""
from .flux import compute_flux
return compute_flux(self.stat.mom0() * self.data_unit,
u.Jy,
wavelength=self.wavelength,
spatial_scale=self.spatial_scale,
velocity_scale=self.velocity_scale,
beam_major=self.beam_major,
beam_minor=self.beam_minor)
@property
def v_rms(self):
"""
Intensity-weighted second moment of velocity (where the velocity axis
can be specified by the ``vaxis`` metadata parameter, which defaults to
0 following the Numpy convention - the third axis in the FITS
convention).
"""
dv = self.velocity_scale or u.pixel
ax = [0, 0, 0]
ax[self.vaxis] = 1
return dv * np.sqrt(self.stat.mom2_along(tuple(ax)))
@property
def position_angle(self):
"""
The position angle of sky_maj, sky_min in degrees counter-clockwise
from the +x axis (note that this is the +x axis in pixel coordinates,
which is the ``-x`` axis for conventional astronomy images).
"""
a, b = self._sky_paxes()
a = list(a)
a.pop(self.vaxis)
return np.degrees(np.arctan2(a[0], a[1])) * u.degree
@property
def area_exact(self):
"""
The exact area of the structure on the sky.
"""
dx = self.spatial_scale or u.pixel
indices = zip(*tuple(self.stat.indices[i] for i in range(3) if i != self.vaxis))
return len(set(indices)) * dx ** 2
class PPStatistic(SpatialBase):
"""
Compute properties of structures in a position-position (PP) cube.
Parameters
----------
structure : :class:`~astrodendro.structure.Structure` instance
The structure to compute the statistics for
metadata : dict
Key-value pairs of metadata
"""
def __init__(self, stat, metadata=None):
if isinstance(stat, Structure):
self.stat = ScalarStatistic(stat.values(subtree=True),
stat.indices(subtree=True))
else:
self.stat = stat
if len(self.stat.indices) != 2:
raise ValueError("PPStatistic can only be used on 2-d datasets")
self.metadata = metadata or {}
def _sky_paxes(self):
return self.stat.paxes()
@property
def flux(self):
"""
The integrated flux of the structure, in Jy (note that this does not
include any kind of background subtraction, and is just a plain sum of
the values in the structure, converted to Jy).
"""
from .flux import compute_flux
return compute_flux(self.stat.mom0() * self.data_unit,
u.Jy,
wavelength=self.wavelength,
spatial_scale=self.spatial_scale,
beam_major=self.beam_major,
beam_minor=self.beam_minor)
@property
def position_angle(self):
"""
The position angle of sky_maj, sky_min in degrees counter-clockwise
from the +x axis.
"""
a, b = self._sky_paxes()
return np.degrees(np.arctan2(a[0], a[1])) * u.degree
@property
def x_cen(self):
"""
The mean position of the structure in the x direction (in pixel
coordinates, or in world coordinates if the WCS transformation is
available in the meta-data).
"""
return self._world_pos()[1]
@property
def y_cen(self):
"""
The mean position of the structure in the y direction (in pixel
coordinates, or in world coordinates if the WCS transformation is
available in the meta-data).
"""
return self._world_pos()[0]
@property
def area_exact(self):
"""
The exact area of the structure on the sky.
"""
dx = self.spatial_scale or u.pixel
return self.stat.count() * dx ** 2
class VolumeBase(object):
__metaclass__ = abc.ABCMeta
spatial_scale = MetadataQuantity('spatial_scale', 'Pixel width/height')
data_unit = MetadataQuantity('data_unit', 'Units of the pixel values', strict=True)
@abc.abstractmethod
def _sky_axes(self):
raise NotImplementedError()
def _world_pos(self):
xyz = self.stat.mom1()[::-1]
return xyz[::-1] * u.pixel
@abc.abstractproperty
def mass(self):
raise NotImplementedError
@abc.abstractproperty
def x_cen(self):
raise NotImplementedError()
@abc.abstractproperty
def y_cen(self):
raise NotImplementedError()
@abc.abstractproperty
def z_cen(self):
raise NotImplementedError()
@abc.abstractproperty
def azimuth(self):
raise NotImplementedError()
@abc.abstractproperty
def elevation(self):
raise NotImplementedError()
@property
def a_sigma(self):
"""
Ellispoidal semi-principal axis 'a' in the position-position-position
(PPP) volume, computed from the intensity weighted second moment
in direction of greatest elongation in the PPP volume.
"""
dx = self.spatial_scale or u.pixel
a, b, c = self._sky_axes()
# We need to multiply the second moment by two to get the major axis
# rather than the half-major axis.
return dx * np.sqrt(self.stat.mom2_along(a))
@property
def b_sigma(self):
"""
Ellispoidal semi-principal axis 'b' in the position-position-position
(PPP) volume, computed from the intensity weighted second moment
in direction of second largest elongation in the PPP volume.
"""
dx = self.spatial_scale or u.pixel
a, b, c = self._sky_axes()
# We need to multiply the second moment by two to get the minor axis
# rather than the half-minor axis.
return dx * np.sqrt(self.stat.mom2_along(b))
@property
def c_sigma(self):
"""
Ellispoidal semi-principal axis 'c' in the position-position-position
(PPP) volume, computed from the intensity weighted second moment
in direction of smallest elongation in the PPP volume.
"""
dx = self.spatial_scale or u.pixel
a, b, c = self._sky_axes()
# We need to multiply the second moment by two to get the minor axis
# rather than the half-minor axis.
return dx * np.sqrt(self.stat.mom2_along(c))
@property
def volume_ellipsoid(self):
"""
The volume of the ellipsoid defined by the second moments, where the
principal axes used are the HWHM (half-width at
half-maximum) derived from the moments.
"""
return 4./3 * np.pi * self.a_sigma * self.b_sigma * self.c_sigma * (2.3548 * 0.5) ** 3
class PPPStatistic(VolumeBase):
def __init__(self, stat, metadata=None):
"""
Derive properties from PPP density.
Parameters
----------
stat : ScalarStatistic instance
"""
if isinstance(stat, Structure):
self.stat = ScalarStatistic(stat.values(subtree=True),
stat.indices(subtree=True))
else:
self.stat = stat
if len(self.stat.indices) != 3:
raise ValueError("PPPStatistic can only be used on 3-d datasets")
self.metadata = metadata or {}
def _sky_axes(self):
ax = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
a, b, c = self.stat.projected_paxes(ax)
a = list(a)
b = list(b)
c = list(c)
return a, b, c
@property
def peak_density(self):
"""
Maximum density in original units.
"""
return np.nanmax(self.stat.values) * self.data_unit
@property
def mass(self):
from .mass import compute_mass
return compute_mass(self.stat.mom0() * self.data_unit,
u.Msun,
spatial_scale=self.spatial_scale)
@property
def volume_exact(self):
"""
The exact volume of the structure in PPP.
"""
dx = self.spatial_scale or u.pixel
return self.stat.count() * dx**3
@property
def radius(self):
"""
Equivalent radius of the sphere occupying the same volume
of the structure.
"""
return (3. * self.volume_exact / (4. * np.pi))**(1./3)
@property
def surface_area(self):
"""
Equivalent area of the circle using the equivalent
radius estimated in PPP.
"""
return np.pi * self.radius ** 2
@property
def surface_density(self):
"""
Surface_density over the equivalent area.
"""
return self.mass / self.surface_area
@property
def azimuth(self):
"""
The position angle of a_axis in degrees counter-clockwise
from the +y axis.
"""
a, b, c = self._sky_axes()
return np.degrees(np.arctan2(a[1], a[2])) * u.degree
@property
def elevation(self):
"""
The position angle of a_axis in degrees clockwise
from the +z axis.
"""
a, b, c = self._sky_axes()
return np.degrees(np.arccos(a[0]/np.linalg.norm(a))) * u.degree
@property
def x_cen(self):
"""
The mean position of the structure in the x direction (in pixel
coordinates).
"""
return self._world_pos()[2]
@property
def y_cen(self):
"""
The mean position of the structure in the y direction (in pixel
coordinates).
"""
return self._world_pos()[1]
@property
def z_cen(self):
"""
The mean position of the structure in the z direction (in pixel
coordinates).
"""
return self._world_pos()[0]
def _make_catalog(structures, fields, metadata, statistic, verbose=False):
"""
Make a catalog from a list of structures
"""
result = None
try:
shape_tuple = structures.data.shape
except AttributeError:
shape_tuple = None
if verbose:
print("Computing catalog for {0} structures".format(len(structures)))
progress_bar = AnimatedProgressBar(end=max(len(structures), 1), width=40, fill='=', blank=' ')
for struct in structures:
values = struct.values(subtree=True)
indices = np.copy(struct.indices(subtree=True))
if shape_tuple is not None:
for index_array, shape in zip(indices, shape_tuple):
# catch simple cases where a structure wraps around the image boundary
i2 = np.where(index_array < shape/2, index_array+shape, index_array)
if i2.ptp() < index_array.ptp(): # more compact with wrapping. Use this
index_array[:] = i2
stat = ScalarStatistic(values, indices)
stat = statistic(stat, metadata)
row = {}
for lbl in fields:
row[lbl] = getattr(stat, lbl)
row = dict((lbl, getattr(stat, lbl))
for lbl in fields)
row.update(_idx=struct.idx)
# first row
if result is None:
sorted_row_keys = sorted(row.keys())
try:
result = Table(names=sorted_row_keys,
dtype=[int if x == '_idx' else float for x in sorted_row_keys])
except TypeError: # dtype was called dtypes in older versions of Astropy
result = Table(names=sorted_row_keys,
dtypes=[int if x == '_idx' else float for x in sorted_row_keys])
for k, v in row.items():
try: # Astropy API change
result[k].unit = _unit(v)
except AttributeError:
result[k].units = _unit(v)
# astropy.table.Table should in future support setting row items from
# quantities, but for now we need to strip off the quantities
new_row = {}
for x in row:
if row[x] is not None: # in Astropy 0.3+ we no longer need to exclude None items
if isinstance(row[x], Quantity):
new_row[x] = row[x].value
else:
new_row[x] = row[x]
result.add_row(new_row)
# Print stats
if verbose:
progress_bar + 1
progress_bar.show_progress()
result.sort('_idx')
if verbose:
progress_bar.progress = 100 # Done
progress_bar.show_progress()
print("") # newline
return result
def ppp_catalog(structures, metadata, fields=None, verbose=True):
"""
Iterate over a collection of position-position-position (PPP) structures,
extracting several quantities from each, and building a catalog.
Parameters
----------
structures : iterable of Structures
The structures to catalog (e.g., a dendrogram)
metadata : dict
The metadata used to compute the catalog
fields : list of strings, optional
The quantities to extract. If not provided,
defaults to all PPV statistics
verbose : bool, optional
If True (the default), will generate warnings
about missing metadata
Returns
-------
table : a :class:`~astropy.table.table.Table` instance
The resulting catalog
"""
fields = fields or ['a_sigma', 'b_sigma', 'c_sigma', 'radius', 'volume_ellipsoid', 'volume_exact',
'surface_area', 'surface_density', 'azimuth', 'elevation',
'x_cen', 'y_cen', 'z_cen', 'mass', 'peak_density']
with warnings.catch_warnings():
warnings.simplefilter("once" if verbose else 'ignore', category=MissingMetadataWarning)
return _make_catalog(structures, fields, metadata, PPPStatistic)
def ppv_catalog(structures, metadata, fields=None, verbose=True):
"""
Iterate over a collection of position-position-velocity (PPV) structures,
extracting several quantities from each, and building a catalog.
Parameters
----------
structures : iterable of Structures
The structures to catalog (e.g., a dendrogram)
metadata : dict
The metadata used to compute the catalog
fields : list of strings, optional
The quantities to extract. If not provided,
defaults to all PPV statistics
verbose : bool, optional
If True (the default), will generate warnings
about missing metadata
Returns
-------
table : a :class:`~astropy.table.table.Table` instance
The resulting catalog
"""
fields = fields or ['major_sigma', 'minor_sigma', 'radius', 'area_ellipse', 'area_exact',
'position_angle', 'v_rms', 'x_cen', 'y_cen', 'v_cen', 'flux']
with warnings.catch_warnings():
warnings.simplefilter("once" if verbose else 'ignore', category=MissingMetadataWarning)
warnings.simplefilter("once" if verbose else 'ignore', category=UnitMetadataWarning)
return _make_catalog(structures, fields, metadata, PPVStatistic, verbose)
def pp_catalog(structures, metadata, fields=None, verbose=True):
"""
Iterate over a collection of position-position (PP) structures, extracting
several quantities from each, and building a catalog.
Parameters
----------
structures : iterable of Structures
The structures to catalog (e.g., a dendrogram)
metadata : dict
The metadata used to compute the catalog
fields : list of strings, optional
The quantities to extract. If not provided,
defaults to all PPV statistics
verbose : bool, optional
If True (the default), will generate warnings
about missing metadata
Returns
-------
table : a :class:`~astropy.table.table.Table` instance
The resulting catalog
"""
fields = fields or ['major_sigma', 'minor_sigma', 'radius', 'area_ellipse', 'area_exact',
'position_angle', 'x_cen', 'y_cen', 'flux']
with warnings.catch_warnings():
warnings.simplefilter("once" if verbose else 'ignore', category=MissingMetadataWarning)
return _make_catalog(structures, fields, metadata, PPStatistic, verbose)
|
|
from unittest import TestCase, mock
from dynamic_fixtures.fixtures.exceptions import FixtureNotFound, MultipleFixturesFound
from dynamic_fixtures.fixtures.loader import Graph
from dynamic_fixtures.fixtures.runner import LoadFixtureRunner
from tests.mixins import MockTestCaseMixin
class LoadFixtureRunnerTestCase(MockTestCaseMixin, TestCase):
def setUp(self):
try:
self.loader_mock = self.setup_mock(
"dynamic_fixtures.fixtures.runner.Loader"
)
self.graph_mock = self.setup_mock("dynamic_fixtures.fixtures.runner.Graph")
self.transaction_mock = self.setup_mock(
"dynamic_fixtures.fixtures.runner.transaction"
)
except AttributeError:
# Python 3.4.2 breaks on copying the __module__ when not available
# on the mocked item.
print(mock.__version__)
def test_init(self):
"""
Case: The runner get initialized
Expected: The Loader get instantiated and the fixtures get loaded from
disc
"""
LoadFixtureRunner()
self.loader_mock.assert_called_once_with()
self.loader_mock.return_value.load_disk.assert_called_once_with()
def test_graph(self):
"""
Case: The graph get requested
Expected: The graph get instantiated once and filled with dependecie
data from the Loader
"""
loader = LoadFixtureRunner()
fixture_mock = mock.MagicMock()
fixture_mock.dependencies = ["b", "c"]
self.loader_mock.return_value.disk_fixtures = {
"a": fixture_mock,
"b": mock.MagicMock(),
"c": mock.MagicMock(),
}
self.assertFalse(self.graph_mock.called)
graph = loader.graph
self.assertIsInstance(graph, Graph)
self.graph_mock.assert_called_once_with()
self.graph_mock.return_value.add_node.assert_has_calls(
[mock.call("a"), mock.call("b"), mock.call("c")], any_order=True
)
self.graph_mock.return_value.add_dependency.assert_has_calls(
[mock.call("a", "b"), mock.call("a", "c")]
)
# Retrieve graph for the 2nd time.
graph = loader.graph
self.assertIsInstance(graph, Graph)
# Should not be instantiated again.
self.assertEqual(self.graph_mock.call_count, 1)
def test_get_app_nodes(self):
"""
Case: Get filtered app nodes
Expected: Only app nodes which from given app get returned
"""
runner = LoadFixtureRunner()
graph = self.graph_mock()
graph.nodes = [
("app_one", "foo"),
("app_one", "bar"),
("app_two", "foo"),
]
runner._graph = graph
app_nodes = runner.get_app_nodes("app_one")
self.assertListEqual(app_nodes, [("app_one", "foo"), ("app_one", "bar")])
def test_get_fixture_nodes(self):
"""
Case: Fixture nodes get requested
Expected: Only nodes starting with the given fixture name get returned
"""
runner = LoadFixtureRunner()
runner.get_app_nodes = mock.MagicMock(
return_value=[
("app_one", "0001_my_fixture"),
("app_one", "0002_my_other_fixture"),
("app_one", "0003_my_other_fixture"),
]
)
result = runner.get_fixture_node(app_label="app_one", fixture_prefix="0001")
self.assertListEqual(result, [("app_one", "0001_my_fixture")])
runner.get_app_nodes.assert_called_once_with(app_label="app_one")
def test_get_fixture_nodes_none_returned(self):
"""
Case: Fixture nodes get requested but none matches
Expected: An error get raised
"""
runner = LoadFixtureRunner()
runner.get_app_nodes = mock.MagicMock(
return_value=[
("app_one", "0001_my_fixture"),
("app_one", "0002_my_other_fixture"),
("app_one", "0003_my_other_fixture"),
]
)
with self.assertRaises(FixtureNotFound) as e:
runner.get_fixture_node(app_label="app_one", fixture_prefix="0006")
self.assertEqual(
"Fixture with prefix '0006' not found in app " "'app_one'", str(e.exception)
)
def test_get_fixture_nodes_multiple_returned(self):
"""
Case: Fixture nodes get requested but multiple matches
Expected: An error get raised
"""
runner = LoadFixtureRunner()
runner.get_app_nodes = mock.MagicMock(
return_value=[
("app_one", "0001_my_fixture"),
("app_one", "0001_my_other_fixture"),
("app_one", "0003_my_other_fixture"),
]
)
with self.assertRaises(MultipleFixturesFound) as e:
runner.get_fixture_node(app_label="app_one", fixture_prefix="0001")
self.assertEqual(
"The following fixtures with prefix '0001' are found"
" in app 'app_one': 0001_my_fixture, "
"0001_my_other_fixture",
str(e.exception),
)
def test_load_fixtures_return_value_two_fixtures(self):
"""
Case: Two fictures get loaded
Expected: 2 as return value
"""
runner = LoadFixtureRunner()
runner._graph = self.graph_mock()
runner._graph.resolve_node.return_value = [
("app_one", "0001_my_fixture"),
("app_one", "0002_my_other_fixture"),
]
runner.loader = self.loader_mock()
runner.loader.disk_fixtures = {
("app_one", "0001_my_fixture"): mock.MagicMock(),
("app_one", "0002_my_other_fixture"): mock.MagicMock(),
}
self.assertEqual(runner.load_fixtures(), 2)
def test_load_fixtures_return_value_no_fixtures(self):
"""
Case: No fixtures found
Expected: 0 as return value
"""
runner = LoadFixtureRunner()
runner._graph = self.graph_mock()
runner._graph.resolve_node.return_value = []
runner.loader = self.loader_mock()
runner.loader.disk_fixtures = {}
self.assertEqual(runner.load_fixtures(), 0)
def test_load_fixtures(self):
"""
Case: Fixtures get loaded
Expected: For every fixture the load method get called
"""
runner = LoadFixtureRunner()
runner._graph = self.graph_mock()
runner._graph.resolve_node.return_value = [
("app_one", "0001_my_fixture"),
("app_one", "0002_my_other_fixture"),
("app_two", "0001_my_other_fixture"),
]
runner.loader = self.loader_mock()
runner.loader.disk_fixtures = {
("app_one", "0001_my_fixture"): mock.MagicMock(),
("app_one", "0002_my_other_fixture"): mock.MagicMock(),
("app_two", "0001_my_other_fixture"): mock.MagicMock(),
}
call_back = mock.Mock(return_value=None)
runner.load_fixtures(progress_callback=call_back)
runner.graph.resolve_node.assert_called_once_with()
for fixture_mock in runner.loader.disk_fixtures.values():
fixture_mock.load.assert_called_once_with()
call_back.assert_has_calls(
[
mock.call("load_start", ("app_one", "0001_my_fixture")),
mock.call("load_success", ("app_one", "0001_my_fixture"), mock.ANY),
mock.call("load_start", ("app_one", "0002_my_other_fixture")),
mock.call(
"load_success", ("app_one", "0002_my_other_fixture"), mock.ANY
),
mock.call("load_start", ("app_two", "0001_my_other_fixture")),
mock.call(
"load_success", ("app_two", "0001_my_other_fixture"), mock.ANY
),
]
)
def test_load_fixtures_with_given_nodes(self):
"""
Case: Load fixtures get called with a given list of nodes
Expected: resolve_nodes method get called with the list of nodes
"""
runner = LoadFixtureRunner()
runner._graph = self.graph_mock()
runner._graph.resolve_nodes.return_value = [
("app_one", "0001_my_fixture"),
("app_one", "0002_my_other_fixture"),
]
runner.loader = self.loader_mock()
runner.loader.disk_fixtures = {
("app_one", "0001_my_fixture"): mock.MagicMock(),
("app_one", "0002_my_other_fixture"): mock.MagicMock(),
("app_two", "0001_my_other_fixture"): mock.MagicMock(),
}
runner.load_fixtures(
nodes=[("app_one", "0001_my_fixture"), ("app_one", "0002_my_other_fixture")]
)
runner.graph.resolve_nodes.assert_called_once_with(
[("app_one", "0001_my_fixture"), ("app_one", "0002_my_other_fixture")]
)
runner.loader.disk_fixtures[
("app_one", "0001_my_fixture")
].load.assert_called_once_with()
runner.loader.disk_fixtures[
("app_one", "0002_my_other_fixture")
].load.assert_called_once_with()
self.assertFalse(
runner.loader.disk_fixtures[
("app_two", "0001_my_other_fixture")
].load.called
)
|
|
"""
How do we implement an API for making modules that build
a GSPN and then run the transitions within?
"""
import logging
logger=logging.getLogger(__file__)
class Place:
"""
A place has a unique key and holds tokens.
"""
def __init__(self, key):
self.key=key
self.tokens=list()
class TokenFlow:
"""
A TokenFlow represents movement of tokens among places to
and from a transition. It encapsulates
1. stoichiometry
2. policy of which token is taken (first, last, random, specific)
3. policy of how and whether new tokens are created
"""
def __init__(self, take_edge, take_cnt, give_edge, give_cnt):
self.take_edge=take_edge
self.take_cnt=take_cnt
self.give_edge=give_edge
self.give_cnt=give_cnt
def fire(self, localstate):
take=list()
for i in range(f.take_cnt):
take.append(localstate[f.take_edge].pop())
for j in range(f.give_cnt):
if length(take)>0:
localstate[f.give_edge].append(take.pop())
class TransitionModifier:
"""
A TransitionModifier is part of a transition used to change
its enabling or hazard rate.
"""
def __init__(self, token_flows):
self.flows=token_flows
def places(self):
return None
def enabled(self, globalstate, localstate, offset, te, t0, rng):
"""
The offset is an integer offset into which integer-indexed
part of the local state corresponds to places required by
this TransitionModifier.
"""
return False
def hazard_modifier(self):
return 1.0
class Transition:
"""
Transition keys aren't unique.
"""
def __init__(self, key, token_flows, stochastic_variable):
self.key=key
self.flows=token_flows
self.stochastic=stochastic_variable
def enabled(self, globalstate, localstate, t0, rng):
"""
globalstate would have the scenario.
localstate is an integer-indexed set of edges to local marking.
t0 is current time.
rng is random number generator
"""
return False
def fire(self, globalstate, localstate, t0, rng):
"""
globalstate would have the scenario.
localstate is an integer-indexed set of edges to local marking.
t0 is current time.
rng is random number generator
"""
pass
class StoichiometricTransition:
"""
This transition enforces stoichiometric coefficients and no more.
But there may be more than one token type, each with different
stoichiometry, expressed in a different flow.
When enabled() is called, if the transition was already enabled,
it compares the current invariant with the previous value to
determine whether to set a new te.
"""
def __init__(self, key, token_flows, stochastic_variable):
"""
key is the non-unique id for this transition.
token_flows is a list of TokenFlows.
stochastic_variable is a stochastic variable.
"""
self.key=key
self.flows=token_flows
self.stochastic=stochastic_variable
def enabled(self, globalstate, localstate, t0, rng):
"""
globalstate would have the scenario.
localstate is an integer-indexed set of edges to local marking.
te is enabling time.
t0 is current time.
rng is random number generator
returns distribution, and an invariant.
"""
for f in self.flows:
if length(localstate[f.take_edge])<f.take_cnt:
return None
return (self.stochastic.build(te), [])
def fire(self, globalstate, localstate, t0, rng):
"""
globalstate would have the scenario.
localstate is an integer-indexed set of edges to local marking.
t0 is current time.
rng is random number generator
"""
for f in self.flows:
f.fire(localstate)
class ModifiedTransition(Transition):
"""
This is a transition that has been modified by TransitionModifier objects.
"""
def __init__(self, base_transition):
self.base_transition=base_transition
self.modifiers=list()
self.key=self.base_transition.key
self.flows=base_transition.flows
def enabled(self, globalstate, localstate, te, t0, rng):
b_enabled=self.base_transition.enabled()
for m in self.modifiers:
if not m.enabled(globalstate, localstate, te, t0, rng):
b_enabled=False
modifier=1.0
for m in self.modifiers:
modifier*=m.hazard_modifier(globalstate, localstate, t0, rng)
hazard=base_transition.hazard(globalstate, localstate, t0, rng, modifier)
return hazard, b_enabled
def fire(self, globalstate, localstate, t0, rng):
pass
class GSPN:
"""
A GSPN is a bipartite graph between places and transitions
and a directed dependency graph for transitions.
Places have unique keys. Transitions have non-unique keys.
Transitions and places both have integer ids which are indices
into the adjacency list.
"""
def __init__(self):
# places will be a list of lists, an adjacency list
self.p=list()
# transitions will be a list of lists, an adjacency list
self.t=list()
self.p_key_to_id=dict()
#### Construction
def add_place(self, pkey):
pid=len(self.p)
self.p.append([Place(pkey), list()])
self.p_key_to_id[pkey]=pid
return pid
def add_transition(self, transition, place_keys, dep_keys):
"""
XXX dep_keys not used. Looks like the place should just
point to those transitions which depend on it.
"""
tid=len(self.t)
# each entry is the transition, the places, the dependencies.
transition_entry=[transition, list(), list()]
self.t.append(transition_entry)
for pkey in place_keys:
pid=self.p_key_to_id[pkey]
self.p[pid][1].append(tid)
transition_entry[1].append(pid)
for dkey in dep_keys:
transition_entry[2].append(self.p_key_to_id[dkey])
#### Access
def transition_places(self, tid):
"""
tid is an integer transition id.
returns list of place ids, integer indices into places.
"""
return self.t[tid][1]
def transition_dependency(self, tid):
return self.t[tid][2]
def dependent_transitions(self, tid):
dep_trans=set()
for pid in self.t[tid][2]:
dep_trans.update(self.p[pid][1])
return dep_trans
class LRCPProcess:
"""
This process uses the GSPN for places and transitions
but doesn't restrict what enables a transition or what the transition
does when it fires.
"""
def __init__(self):
self.gspn=GSPN()
self.current_time=0
# Builder methods pass through to GSPN.
def add_place(self, pkey):
self.gspn.add_place(self, pkey)
def add_transition(self, transition, place_keys):
self.gspn.add_transition(transition, place_keys)
# Set initial marking.
def add_token(self, pkey, token):
self.gspn.p[self.gspn.p_key_to_id[pkey]][0].tokens.append(token)
def transition_distribution(self, transition_entry):
transition=transition_entry[0]
places=transition_entry[1]
dist=transition.distribution(self.globalstate, self.places,
self.current_time, rng)
return dist
def enabled_transitions(self, functor):
enabled_t=list()
results=[0]*length(self.gspn.t)
for idx, tentry in enumerate(self.gspn.t):
results[idx]=functor(self.transition_distribution(tentry))
return results
class GSPNProcess:
def __init__(self):
self.gspn=GSPN()
self.current_time=0
# Builder methods pass through to GSPN.
def add_place(self, pkey):
self.gspn.add_place(self, pkey)
def add_transition(self, transition, place_keys, dep_keys):
self.gspn.add_transition(transition, place_keys, dep_keys)
# Set initial marking.
def add_token(self, pkey, token):
self.gspn.p[self.gspn.p_key_to_id[pkey]][0].tokens.append(token)
# These are for sampling.
def stoichiometry_satisfied(self, tid):
"""
Check that input stoichiometry is satisfied.
"""
satisfied=True
transition_entry=self.gspn.t[tid]
places=transition_entry[1]
for f in transition_entry[0].flows:
if length(self.gspn.p[f.take_edge][0].tokens)<f.take_cnt:
satisfied=False
return satisfied
def transition_distribution(self, tid, te):
pass
def enabled_transitions(self):
enabled_t=list()
for t in self.gspn.t:
if self.stoichiometry_satisfied(tid):
pass
|
|
from __future__ import unicode_literals
from botocore.exceptions import ClientError, ParamValidationError
import boto3
import sure # noqa
from moto import mock_ec2, mock_kms, mock_rds2
@mock_rds2
def test_create_database():
conn = boto3.client('rds', region_name='us-west-2')
database = conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
Engine='postgres',
DBName='staging-postgres',
DBInstanceClass='db.m1.small',
LicenseModel='license-included',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=["my_sg"])
database['DBInstance']['AllocatedStorage'].should.equal(10)
database['DBInstance']['DBInstanceClass'].should.equal("db.m1.small")
database['DBInstance']['LicenseModel'].should.equal("license-included")
database['DBInstance']['MasterUsername'].should.equal("root")
database['DBInstance']['DBSecurityGroups'][0][
'DBSecurityGroupName'].should.equal('my_sg')
database['DBInstance']['DBInstanceArn'].should.equal(
'arn:aws:rds:us-west-2:1234567890:db:db-master-1')
database['DBInstance']['DBInstanceStatus'].should.equal('available')
database['DBInstance']['DBName'].should.equal('staging-postgres')
database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1")
@mock_rds2
def test_stop_database():
conn = boto3.client('rds', region_name='us-west-2')
database = conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
Engine='postgres',
DBName='staging-postgres',
DBInstanceClass='db.m1.small',
LicenseModel='license-included',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=["my_sg"])
mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0]
mydb['DBInstanceStatus'].should.equal('available')
# test stopping database should shutdown
response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'])
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response['DBInstance']['DBInstanceStatus'].should.equal('shutdown')
# test rdsclient error when trying to stop an already stopped database
conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError)
# test stopping a stopped database with snapshot should error and no snapshot should exist for that call
conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError)
response = conn.describe_db_snapshots()
response['DBSnapshots'].should.equal([])
@mock_rds2
def test_start_database():
conn = boto3.client('rds', region_name='us-west-2')
database = conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
Engine='postgres',
DBName='staging-postgres',
DBInstanceClass='db.m1.small',
LicenseModel='license-included',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=["my_sg"])
mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0]
mydb['DBInstanceStatus'].should.equal('available')
# test starting an already started database should error
conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError)
# stop and test start - should go from shutdown to available, create snapshot and check snapshot
response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap')
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response['DBInstance']['DBInstanceStatus'].should.equal('shutdown')
response = conn.describe_db_snapshots()
response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap')
response = conn.start_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'])
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response['DBInstance']['DBInstanceStatus'].should.equal('available')
# starting database should not remove snapshot
response = conn.describe_db_snapshots()
response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap')
# test stopping database, create snapshot with existing snapshot already created should throw error
conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError)
# test stopping database not invoking snapshot should succeed.
response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'])
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response['DBInstance']['DBInstanceStatus'].should.equal('shutdown')
@mock_rds2
def test_fail_to_stop_multi_az():
conn = boto3.client('rds', region_name='us-west-2')
database = conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
Engine='postgres',
DBName='staging-postgres',
DBInstanceClass='db.m1.small',
LicenseModel='license-included',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=["my_sg"],
MultiAZ=True)
mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0]
mydb['DBInstanceStatus'].should.equal('available')
# multi-az databases arent allowed to be shutdown at this time.
conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError)
# multi-az databases arent allowed to be started up at this time.
conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError)
@mock_rds2
def test_fail_to_stop_readreplica():
conn = boto3.client('rds', region_name='us-west-2')
database = conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
Engine='postgres',
DBName='staging-postgres',
DBInstanceClass='db.m1.small',
LicenseModel='license-included',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=["my_sg"])
replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1",
SourceDBInstanceIdentifier="db-master-1",
DBInstanceClass="db.m1.small")
mydb = conn.describe_db_instances(DBInstanceIdentifier=replica['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0]
mydb['DBInstanceStatus'].should.equal('available')
# read-replicas are not allowed to be stopped at this time.
conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError)
# read-replicas are not allowed to be started at this time.
conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError)
@mock_rds2
def test_get_databases():
conn = boto3.client('rds', region_name='us-west-2')
instances = conn.describe_db_instances()
list(instances['DBInstances']).should.have.length_of(0)
conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
DBInstanceClass='postgres',
Engine='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=['my_sg'])
conn.create_db_instance(DBInstanceIdentifier='db-master-2',
AllocatedStorage=10,
DBInstanceClass='postgres',
Engine='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=['my_sg'])
instances = conn.describe_db_instances()
list(instances['DBInstances']).should.have.length_of(2)
instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
list(instances['DBInstances']).should.have.length_of(1)
instances['DBInstances'][0][
'DBInstanceIdentifier'].should.equal("db-master-1")
instances['DBInstances'][0]['DBInstanceArn'].should.equal(
'arn:aws:rds:us-west-2:1234567890:db:db-master-1')
@mock_rds2
def test_get_databases_paginated():
conn = boto3.client('rds', region_name="us-west-2")
for i in range(51):
conn.create_db_instance(AllocatedStorage=5,
Port=5432,
DBInstanceIdentifier='rds%d' % i,
DBInstanceClass='db.t1.micro',
Engine='postgres')
resp = conn.describe_db_instances()
resp["DBInstances"].should.have.length_of(50)
resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier'])
resp2 = conn.describe_db_instances(Marker=resp["Marker"])
resp2["DBInstances"].should.have.length_of(1)
@mock_rds2
def test_describe_non_existant_database():
conn = boto3.client('rds', region_name='us-west-2')
conn.describe_db_instances.when.called_with(
DBInstanceIdentifier="not-a-db").should.throw(ClientError)
@mock_rds2
def test_modify_db_instance():
conn = boto3.client('rds', region_name='us-west-2')
database = conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
DBInstanceClass='postgres',
Engine='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=['my_sg'])
instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1')
instances['DBInstances'][0]['AllocatedStorage'].should.equal(10)
conn.modify_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=20,
ApplyImmediately=True)
instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1')
instances['DBInstances'][0]['AllocatedStorage'].should.equal(20)
@mock_rds2
def test_modify_non_existant_database():
conn = boto3.client('rds', region_name='us-west-2')
conn.modify_db_instance.when.called_with(DBInstanceIdentifier='not-a-db',
AllocatedStorage=20,
ApplyImmediately=True).should.throw(ClientError)
@mock_rds2
def test_reboot_db_instance():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
DBInstanceClass='postgres',
Engine='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=['my_sg'])
database = conn.reboot_db_instance(DBInstanceIdentifier='db-master-1')
database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1")
@mock_rds2
def test_reboot_non_existant_database():
conn = boto3.client('rds', region_name='us-west-2')
conn.reboot_db_instance.when.called_with(
DBInstanceIdentifier="not-a-db").should.throw(ClientError)
@mock_rds2
def test_delete_database():
conn = boto3.client('rds', region_name='us-west-2')
instances = conn.describe_db_instances()
list(instances['DBInstances']).should.have.length_of(0)
conn.create_db_instance(DBInstanceIdentifier='db-primary-1',
AllocatedStorage=10,
Engine='postgres',
DBInstanceClass='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=['my_sg'])
instances = conn.describe_db_instances()
list(instances['DBInstances']).should.have.length_of(1)
conn.delete_db_instance(DBInstanceIdentifier="db-primary-1",
FinalDBSnapshotIdentifier='primary-1-snapshot')
instances = conn.describe_db_instances()
list(instances['DBInstances']).should.have.length_of(0)
# Saved the snapshot
snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get('DBSnapshots')
snapshots[0].get('Engine').should.equal('postgres')
@mock_rds2
def test_delete_non_existant_database():
conn = boto3.client('rds2', region_name="us-west-2")
conn.delete_db_instance.when.called_with(
DBInstanceIdentifier="not-a-db").should.throw(ClientError)
@mock_rds2
def test_create_db_snapshots():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_snapshot.when.called_with(
DBInstanceIdentifier='db-primary-1',
DBSnapshotIdentifier='snapshot-1').should.throw(ClientError)
conn.create_db_instance(DBInstanceIdentifier='db-primary-1',
AllocatedStorage=10,
Engine='postgres',
DBName='staging-postgres',
DBInstanceClass='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=["my_sg"])
snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1',
DBSnapshotIdentifier='g-1').get('DBSnapshot')
snapshot.get('Engine').should.equal('postgres')
snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1')
snapshot.get('DBSnapshotIdentifier').should.equal('g-1')
@mock_rds2
def test_describe_db_snapshots():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_instance(DBInstanceIdentifier='db-primary-1',
AllocatedStorage=10,
Engine='postgres',
DBName='staging-postgres',
DBInstanceClass='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=["my_sg"])
conn.describe_db_snapshots.when.called_with(
DBInstanceIdentifier="db-primary-1").should.throw(ClientError)
created = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1',
DBSnapshotIdentifier='snapshot-1').get('DBSnapshot')
created.get('Engine').should.equal('postgres')
by_database_id = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots')
by_snapshot_id = conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots')
by_snapshot_id.should.equal(by_database_id)
snapshot = by_snapshot_id[0]
snapshot.should.equal(created)
snapshot.get('Engine').should.equal('postgres')
@mock_rds2
def test_delete_db_snapshot():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_instance(DBInstanceIdentifier='db-primary-1',
AllocatedStorage=10,
Engine='postgres',
DBName='staging-postgres',
DBInstanceClass='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=["my_sg"])
conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1',
DBSnapshotIdentifier='snapshot-1')
conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots')[0]
conn.delete_db_snapshot(DBSnapshotIdentifier='snapshot-1')
conn.describe_db_snapshots.when.called_with(
DBSnapshotIdentifier='snapshot-1').should.throw(ClientError)
@mock_rds2
def test_create_option_group():
conn = boto3.client('rds', region_name='us-west-2')
option_group = conn.create_option_group(OptionGroupName='test',
EngineName='mysql',
MajorEngineVersion='5.6',
OptionGroupDescription='test option group')
option_group['OptionGroup']['OptionGroupName'].should.equal('test')
option_group['OptionGroup']['EngineName'].should.equal('mysql')
option_group['OptionGroup'][
'OptionGroupDescription'].should.equal('test option group')
option_group['OptionGroup']['MajorEngineVersion'].should.equal('5.6')
@mock_rds2
def test_create_option_group_bad_engine_name():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_option_group.when.called_with(OptionGroupName='test',
EngineName='invalid_engine',
MajorEngineVersion='5.6',
OptionGroupDescription='test invalid engine').should.throw(ClientError)
@mock_rds2
def test_create_option_group_bad_engine_major_version():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_option_group.when.called_with(OptionGroupName='test',
EngineName='mysql',
MajorEngineVersion='6.6.6',
OptionGroupDescription='test invalid engine version').should.throw(ClientError)
@mock_rds2
def test_create_option_group_empty_description():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_option_group.when.called_with(OptionGroupName='test',
EngineName='mysql',
MajorEngineVersion='5.6',
OptionGroupDescription='').should.throw(ClientError)
@mock_rds2
def test_create_option_group_duplicate():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_option_group(OptionGroupName='test',
EngineName='mysql',
MajorEngineVersion='5.6',
OptionGroupDescription='test option group')
conn.create_option_group.when.called_with(OptionGroupName='test',
EngineName='mysql',
MajorEngineVersion='5.6',
OptionGroupDescription='test option group').should.throw(ClientError)
@mock_rds2
def test_describe_option_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_option_group(OptionGroupName='test',
EngineName='mysql',
MajorEngineVersion='5.6',
OptionGroupDescription='test option group')
option_groups = conn.describe_option_groups(OptionGroupName='test')
option_groups['OptionGroupsList'][0][
'OptionGroupName'].should.equal('test')
@mock_rds2
def test_describe_non_existant_option_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.describe_option_groups.when.called_with(
OptionGroupName="not-a-option-group").should.throw(ClientError)
@mock_rds2
def test_delete_option_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_option_group(OptionGroupName='test',
EngineName='mysql',
MajorEngineVersion='5.6',
OptionGroupDescription='test option group')
option_groups = conn.describe_option_groups(OptionGroupName='test')
option_groups['OptionGroupsList'][0][
'OptionGroupName'].should.equal('test')
conn.delete_option_group(OptionGroupName='test')
conn.describe_option_groups.when.called_with(
OptionGroupName='test').should.throw(ClientError)
@mock_rds2
def test_delete_non_existant_option_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.delete_option_group.when.called_with(
OptionGroupName='non-existant').should.throw(ClientError)
@mock_rds2
def test_describe_option_group_options():
conn = boto3.client('rds', region_name='us-west-2')
option_group_options = conn.describe_option_group_options(
EngineName='sqlserver-ee')
len(option_group_options['OptionGroupOptions']).should.equal(4)
option_group_options = conn.describe_option_group_options(
EngineName='sqlserver-ee', MajorEngineVersion='11.00')
len(option_group_options['OptionGroupOptions']).should.equal(2)
option_group_options = conn.describe_option_group_options(
EngineName='mysql', MajorEngineVersion='5.6')
len(option_group_options['OptionGroupOptions']).should.equal(1)
conn.describe_option_group_options.when.called_with(
EngineName='non-existent').should.throw(ClientError)
conn.describe_option_group_options.when.called_with(
EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError)
@mock_rds2
def test_modify_option_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_option_group(OptionGroupName='test', EngineName='mysql',
MajorEngineVersion='5.6', OptionGroupDescription='test option group')
# TODO: create option and validate before deleting.
# if Someone can tell me how the hell to use this function
# to add options to an option_group, I can finish coding this.
result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[
], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True)
result['OptionGroup']['EngineName'].should.equal('mysql')
result['OptionGroup']['Options'].should.equal([])
result['OptionGroup']['OptionGroupName'].should.equal('test')
@mock_rds2
def test_modify_option_group_no_options():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_option_group(OptionGroupName='test', EngineName='mysql',
MajorEngineVersion='5.6', OptionGroupDescription='test option group')
conn.modify_option_group.when.called_with(
OptionGroupName='test').should.throw(ClientError)
@mock_rds2
def test_modify_non_existant_option_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[(
'OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError)
@mock_rds2
def test_delete_non_existant_database():
conn = boto3.client('rds', region_name='us-west-2')
conn.delete_db_instance.when.called_with(
DBInstanceIdentifier="not-a-db").should.throw(ClientError)
@mock_rds2
def test_list_tags_invalid_arn():
conn = boto3.client('rds', region_name='us-west-2')
conn.list_tags_for_resource.when.called_with(
ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError)
@mock_rds2
def test_list_tags_db():
conn = boto3.client('rds', region_name='us-west-2')
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo')
result['TagList'].should.equal([])
test_instance = conn.create_db_instance(
DBInstanceIdentifier='db-with-tags',
AllocatedStorage=10,
DBInstanceClass='postgres',
Engine='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=['my_sg'],
Tags=[
{
'Key': 'foo',
'Value': 'bar',
},
{
'Key': 'foo1',
'Value': 'bar1',
},
])
result = conn.list_tags_for_resource(
ResourceName=test_instance['DBInstance']['DBInstanceArn'])
result['TagList'].should.equal([{'Value': 'bar',
'Key': 'foo'},
{'Value': 'bar1',
'Key': 'foo1'}])
@mock_rds2
def test_add_tags_db():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_instance(DBInstanceIdentifier='db-without-tags',
AllocatedStorage=10,
DBInstanceClass='postgres',
Engine='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=['my_sg'],
Tags=[
{
'Key': 'foo',
'Value': 'bar',
},
{
'Key': 'foo1',
'Value': 'bar1',
},
])
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags')
list(result['TagList']).should.have.length_of(2)
conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags',
Tags=[
{
'Key': 'foo',
'Value': 'fish',
},
{
'Key': 'foo2',
'Value': 'bar2',
},
])
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags')
list(result['TagList']).should.have.length_of(3)
@mock_rds2
def test_remove_tags_db():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_instance(DBInstanceIdentifier='db-with-tags',
AllocatedStorage=10,
DBInstanceClass='postgres',
Engine='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=['my_sg'],
Tags=[
{
'Key': 'foo',
'Value': 'bar',
},
{
'Key': 'foo1',
'Value': 'bar1',
},
])
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags')
list(result['TagList']).should.have.length_of(2)
conn.remove_tags_from_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo'])
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags')
len(result['TagList']).should.equal(1)
@mock_rds2
def test_add_tags_option_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_option_group(OptionGroupName='test',
EngineName='mysql',
MajorEngineVersion='5.6',
OptionGroupDescription='test option group')
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:og:test')
list(result['TagList']).should.have.length_of(0)
conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test',
Tags=[
{
'Key': 'foo',
'Value': 'fish',
},
{
'Key': 'foo2',
'Value': 'bar2',
}])
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:og:test')
list(result['TagList']).should.have.length_of(2)
@mock_rds2
def test_remove_tags_option_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_option_group(OptionGroupName='test',
EngineName='mysql',
MajorEngineVersion='5.6',
OptionGroupDescription='test option group')
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:og:test')
conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test',
Tags=[
{
'Key': 'foo',
'Value': 'fish',
},
{
'Key': 'foo2',
'Value': 'bar2',
}])
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:og:test')
list(result['TagList']).should.have.length_of(2)
conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test',
TagKeys=['foo'])
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:og:test')
list(result['TagList']).should.have.length_of(1)
@mock_rds2
def test_create_database_security_group():
conn = boto3.client('rds', region_name='us-west-2')
result = conn.create_db_security_group(
DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group')
result['DBSecurityGroup']['DBSecurityGroupName'].should.equal("db_sg")
result['DBSecurityGroup'][
'DBSecurityGroupDescription'].should.equal("DB Security Group")
result['DBSecurityGroup']['IPRanges'].should.equal([])
@mock_rds2
def test_get_security_groups():
conn = boto3.client('rds', region_name='us-west-2')
result = conn.describe_db_security_groups()
result['DBSecurityGroups'].should.have.length_of(0)
conn.create_db_security_group(
DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group')
conn.create_db_security_group(
DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group')
result = conn.describe_db_security_groups()
result['DBSecurityGroups'].should.have.length_of(2)
result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg1")
result['DBSecurityGroups'].should.have.length_of(1)
result['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal("db_sg1")
@mock_rds2
def test_get_non_existant_security_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.describe_db_security_groups.when.called_with(
DBSecurityGroupName="not-a-sg").should.throw(ClientError)
@mock_rds2
def test_delete_database_security_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_security_group(
DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group')
result = conn.describe_db_security_groups()
result['DBSecurityGroups'].should.have.length_of(1)
conn.delete_db_security_group(DBSecurityGroupName="db_sg")
result = conn.describe_db_security_groups()
result['DBSecurityGroups'].should.have.length_of(0)
@mock_rds2
def test_delete_non_existant_security_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.delete_db_security_group.when.called_with(
DBSecurityGroupName="not-a-db").should.throw(ClientError)
@mock_rds2
def test_security_group_authorize():
conn = boto3.client('rds', region_name='us-west-2')
security_group = conn.create_db_security_group(DBSecurityGroupName='db_sg',
DBSecurityGroupDescription='DB Security Group')
security_group['DBSecurityGroup']['IPRanges'].should.equal([])
conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg',
CIDRIP='10.3.2.45/32')
result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg")
result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(1)
result['DBSecurityGroups'][0]['IPRanges'].should.equal(
[{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}])
conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg',
CIDRIP='10.3.2.46/32')
result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg")
result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(2)
result['DBSecurityGroups'][0]['IPRanges'].should.equal([
{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'},
{'Status': 'authorized', 'CIDRIP': '10.3.2.46/32'},
])
@mock_rds2
def test_add_security_group_to_database():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
DBInstanceClass='postgres',
Engine='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234)
result = conn.describe_db_instances()
result['DBInstances'][0]['DBSecurityGroups'].should.equal([])
conn.create_db_security_group(DBSecurityGroupName='db_sg',
DBSecurityGroupDescription='DB Security Group')
conn.modify_db_instance(DBInstanceIdentifier='db-master-1',
DBSecurityGroups=['db_sg'])
result = conn.describe_db_instances()
result['DBInstances'][0]['DBSecurityGroups'][0][
'DBSecurityGroupName'].should.equal('db_sg')
@mock_rds2
def test_list_tags_security_group():
conn = boto3.client('rds', region_name='us-west-2')
result = conn.describe_db_subnet_groups()
result['DBSubnetGroups'].should.have.length_of(0)
security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg",
DBSecurityGroupDescription='DB Security Group',
Tags=[{'Value': 'bar',
'Key': 'foo'},
{'Value': 'bar1',
'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName']
resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(
security_group)
result = conn.list_tags_for_resource(ResourceName=resource)
result['TagList'].should.equal([{'Value': 'bar',
'Key': 'foo'},
{'Value': 'bar1',
'Key': 'foo1'}])
@mock_rds2
def test_add_tags_security_group():
conn = boto3.client('rds', region_name='us-west-2')
result = conn.describe_db_subnet_groups()
result['DBSubnetGroups'].should.have.length_of(0)
security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg",
DBSecurityGroupDescription='DB Security Group')['DBSecurityGroup']['DBSecurityGroupName']
resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(
security_group)
conn.add_tags_to_resource(ResourceName=resource,
Tags=[{'Value': 'bar',
'Key': 'foo'},
{'Value': 'bar1',
'Key': 'foo1'}])
result = conn.list_tags_for_resource(ResourceName=resource)
result['TagList'].should.equal([{'Value': 'bar',
'Key': 'foo'},
{'Value': 'bar1',
'Key': 'foo1'}])
@mock_rds2
def test_remove_tags_security_group():
conn = boto3.client('rds', region_name='us-west-2')
result = conn.describe_db_subnet_groups()
result['DBSubnetGroups'].should.have.length_of(0)
security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg",
DBSecurityGroupDescription='DB Security Group',
Tags=[{'Value': 'bar',
'Key': 'foo'},
{'Value': 'bar1',
'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName']
resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format(
security_group)
conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo'])
result = conn.list_tags_for_resource(ResourceName=resource)
result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}])
@mock_ec2
@mock_rds2
def test_create_database_subnet_group():
vpc_conn = boto3.client('ec2', 'us-west-2')
vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc']
subnet1 = vpc_conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet']
subnet2 = vpc_conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet']
subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']]
conn = boto3.client('rds', region_name='us-west-2')
result = conn.create_db_subnet_group(DBSubnetGroupName='db_subnet',
DBSubnetGroupDescription='my db subnet',
SubnetIds=subnet_ids)
result['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet")
result['DBSubnetGroup'][
'DBSubnetGroupDescription'].should.equal("my db subnet")
subnets = result['DBSubnetGroup']['Subnets']
subnet_group_ids = [subnets[0]['SubnetIdentifier'],
subnets[1]['SubnetIdentifier']]
list(subnet_group_ids).should.equal(subnet_ids)
@mock_ec2
@mock_rds2
def test_create_database_in_subnet_group():
vpc_conn = boto3.client('ec2', 'us-west-2')
vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc']
subnet = vpc_conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet']
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1',
DBSubnetGroupDescription='my db subnet',
SubnetIds=[subnet['SubnetId']])
conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
Engine='postgres',
DBInstanceClass='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSubnetGroupName='db_subnet1')
result = conn.describe_db_instances(DBInstanceIdentifier='db-master-1')
result['DBInstances'][0]['DBSubnetGroup'][
'DBSubnetGroupName'].should.equal('db_subnet1')
@mock_ec2
@mock_rds2
def test_describe_database_subnet_group():
vpc_conn = boto3.client('ec2', 'us-west-2')
vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc']
subnet = vpc_conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet']
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1",
DBSubnetGroupDescription='my db subnet',
SubnetIds=[subnet['SubnetId']])
conn.create_db_subnet_group(DBSubnetGroupName='db_subnet2',
DBSubnetGroupDescription='my db subnet',
SubnetIds=[subnet['SubnetId']])
resp = conn.describe_db_subnet_groups()
resp['DBSubnetGroups'].should.have.length_of(2)
subnets = resp['DBSubnetGroups'][0]['Subnets']
subnets.should.have.length_of(1)
list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1")
['DBSubnetGroups']).should.have.length_of(1)
conn.describe_db_subnet_groups.when.called_with(
DBSubnetGroupName="not-a-subnet").should.throw(ClientError)
@mock_ec2
@mock_rds2
def test_delete_database_subnet_group():
vpc_conn = boto3.client('ec2', 'us-west-2')
vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc']
subnet = vpc_conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet']
conn = boto3.client('rds', region_name='us-west-2')
result = conn.describe_db_subnet_groups()
result['DBSubnetGroups'].should.have.length_of(0)
conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1",
DBSubnetGroupDescription='my db subnet',
SubnetIds=[subnet['SubnetId']])
result = conn.describe_db_subnet_groups()
result['DBSubnetGroups'].should.have.length_of(1)
conn.delete_db_subnet_group(DBSubnetGroupName="db_subnet1")
result = conn.describe_db_subnet_groups()
result['DBSubnetGroups'].should.have.length_of(0)
conn.delete_db_subnet_group.when.called_with(
DBSubnetGroupName="db_subnet1").should.throw(ClientError)
@mock_ec2
@mock_rds2
def test_list_tags_database_subnet_group():
vpc_conn = boto3.client('ec2', 'us-west-2')
vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc']
subnet = vpc_conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet']
conn = boto3.client('rds', region_name='us-west-2')
result = conn.describe_db_subnet_groups()
result['DBSubnetGroups'].should.have.length_of(0)
subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1",
DBSubnetGroupDescription='my db subnet',
SubnetIds=[subnet['SubnetId']],
Tags=[{'Value': 'bar',
'Key': 'foo'},
{'Value': 'bar1',
'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName']
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet))
result['TagList'].should.equal([{'Value': 'bar',
'Key': 'foo'},
{'Value': 'bar1',
'Key': 'foo1'}])
@mock_ec2
@mock_rds2
def test_add_tags_database_subnet_group():
vpc_conn = boto3.client('ec2', 'us-west-2')
vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc']
subnet = vpc_conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet']
conn = boto3.client('rds', region_name='us-west-2')
result = conn.describe_db_subnet_groups()
result['DBSubnetGroups'].should.have.length_of(0)
subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1",
DBSubnetGroupDescription='my db subnet',
SubnetIds=[subnet['SubnetId']],
Tags=[])['DBSubnetGroup']['DBSubnetGroupName']
resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)
conn.add_tags_to_resource(ResourceName=resource,
Tags=[{'Value': 'bar',
'Key': 'foo'},
{'Value': 'bar1',
'Key': 'foo1'}])
result = conn.list_tags_for_resource(ResourceName=resource)
result['TagList'].should.equal([{'Value': 'bar',
'Key': 'foo'},
{'Value': 'bar1',
'Key': 'foo1'}])
@mock_ec2
@mock_rds2
def test_remove_tags_database_subnet_group():
vpc_conn = boto3.client('ec2', 'us-west-2')
vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc']
subnet = vpc_conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet']
conn = boto3.client('rds', region_name='us-west-2')
result = conn.describe_db_subnet_groups()
result['DBSubnetGroups'].should.have.length_of(0)
subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1",
DBSubnetGroupDescription='my db subnet',
SubnetIds=[subnet['SubnetId']],
Tags=[{'Value': 'bar',
'Key': 'foo'},
{'Value': 'bar1',
'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName']
resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)
conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo'])
result = conn.list_tags_for_resource(ResourceName=resource)
result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}])
@mock_rds2
def test_create_database_replica():
conn = boto3.client('rds', region_name='us-west-2')
database = conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
Engine='postgres',
DBInstanceClass='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=["my_sg"])
replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1",
SourceDBInstanceIdentifier="db-master-1",
DBInstanceClass="db.m1.small")
replica['DBInstance'][
'ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1')
replica['DBInstance']['DBInstanceClass'].should.equal('db.m1.small')
replica['DBInstance']['DBInstanceIdentifier'].should.equal('db-replica-1')
master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([
'db-replica-1'])
conn.delete_db_instance(
DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True)
master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
master['DBInstances'][0][
'ReadReplicaDBInstanceIdentifiers'].should.equal([])
@mock_rds2
@mock_kms
def test_create_database_with_encrypted_storage():
kms_conn = boto3.client('kms', region_name='us-west-2')
key = kms_conn.create_key(Policy='my RDS encryption policy',
Description='RDS encryption key',
KeyUsage='ENCRYPT_DECRYPT')
conn = boto3.client('rds', region_name='us-west-2')
database = conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
Engine='postgres',
DBInstanceClass='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234,
DBSecurityGroups=["my_sg"],
StorageEncrypted=True,
KmsKeyId=key['KeyMetadata']['KeyId'])
database['DBInstance']['StorageEncrypted'].should.equal(True)
database['DBInstance']['KmsKeyId'].should.equal(
key['KeyMetadata']['KeyId'])
@mock_rds2
def test_create_db_parameter_group():
conn = boto3.client('rds', region_name='us-west-2')
db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test',
DBParameterGroupFamily='mysql5.6',
Description='test parameter group')
db_parameter_group['DBParameterGroup'][
'DBParameterGroupName'].should.equal('test')
db_parameter_group['DBParameterGroup'][
'DBParameterGroupFamily'].should.equal('mysql5.6')
db_parameter_group['DBParameterGroup'][
'Description'].should.equal('test parameter group')
@mock_rds2
def test_create_db_instance_with_parameter_group():
conn = boto3.client('rds', region_name='us-west-2')
db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test',
DBParameterGroupFamily='mysql5.6',
Description='test parameter group')
database = conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
Engine='mysql',
DBInstanceClass='db.m1.small',
DBParameterGroupName='test',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234)
len(database['DBInstance']['DBParameterGroups']).should.equal(1)
database['DBInstance']['DBParameterGroups'][0][
'DBParameterGroupName'].should.equal('test')
database['DBInstance']['DBParameterGroups'][0][
'ParameterApplyStatus'].should.equal('in-sync')
@mock_rds2
def test_create_database_with_default_port():
conn = boto3.client('rds', region_name='us-west-2')
database = conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
Engine='postgres',
DBInstanceClass='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
DBSecurityGroups=["my_sg"])
database['DBInstance']['Endpoint']['Port'].should.equal(5432)
@mock_rds2
def test_modify_db_instance_with_parameter_group():
conn = boto3.client('rds', region_name='us-west-2')
database = conn.create_db_instance(DBInstanceIdentifier='db-master-1',
AllocatedStorage=10,
Engine='mysql',
DBInstanceClass='db.m1.small',
MasterUsername='root',
MasterUserPassword='hunter2',
Port=1234)
len(database['DBInstance']['DBParameterGroups']).should.equal(1)
database['DBInstance']['DBParameterGroups'][0][
'DBParameterGroupName'].should.equal('default.mysql5.6')
database['DBInstance']['DBParameterGroups'][0][
'ParameterApplyStatus'].should.equal('in-sync')
db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test',
DBParameterGroupFamily='mysql5.6',
Description='test parameter group')
conn.modify_db_instance(DBInstanceIdentifier='db-master-1',
DBParameterGroupName='test',
ApplyImmediately=True)
database = conn.describe_db_instances(
DBInstanceIdentifier='db-master-1')['DBInstances'][0]
len(database['DBParameterGroups']).should.equal(1)
database['DBParameterGroups'][0][
'DBParameterGroupName'].should.equal('test')
database['DBParameterGroups'][0][
'ParameterApplyStatus'].should.equal('in-sync')
@mock_rds2
def test_create_db_parameter_group_empty_description():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test',
DBParameterGroupFamily='mysql5.6',
Description='').should.throw(ClientError)
@mock_rds2
def test_create_db_parameter_group_duplicate():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_parameter_group(DBParameterGroupName='test',
DBParameterGroupFamily='mysql5.6',
Description='test parameter group')
conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test',
DBParameterGroupFamily='mysql5.6',
Description='test parameter group').should.throw(ClientError)
@mock_rds2
def test_describe_db_parameter_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_parameter_group(DBParameterGroupName='test',
DBParameterGroupFamily='mysql5.6',
Description='test parameter group')
db_parameter_groups = conn.describe_db_parameter_groups(
DBParameterGroupName='test')
db_parameter_groups['DBParameterGroups'][0][
'DBParameterGroupName'].should.equal('test')
@mock_rds2
def test_describe_non_existant_db_parameter_group():
conn = boto3.client('rds', region_name='us-west-2')
db_parameter_groups = conn.describe_db_parameter_groups(
DBParameterGroupName='test')
len(db_parameter_groups['DBParameterGroups']).should.equal(0)
@mock_rds2
def test_delete_db_parameter_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_parameter_group(DBParameterGroupName='test',
DBParameterGroupFamily='mysql5.6',
Description='test parameter group')
db_parameter_groups = conn.describe_db_parameter_groups(
DBParameterGroupName='test')
db_parameter_groups['DBParameterGroups'][0][
'DBParameterGroupName'].should.equal('test')
conn.delete_db_parameter_group(DBParameterGroupName='test')
db_parameter_groups = conn.describe_db_parameter_groups(
DBParameterGroupName='test')
len(db_parameter_groups['DBParameterGroups']).should.equal(0)
@mock_rds2
def test_modify_db_parameter_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_parameter_group(DBParameterGroupName='test',
DBParameterGroupFamily='mysql5.6',
Description='test parameter group')
modify_result = conn.modify_db_parameter_group(DBParameterGroupName='test',
Parameters=[{
'ParameterName': 'foo',
'ParameterValue': 'foo_val',
'Description': 'test param',
'ApplyMethod': 'immediate'
}]
)
modify_result['DBParameterGroupName'].should.equal('test')
db_parameters = conn.describe_db_parameters(DBParameterGroupName='test')
db_parameters['Parameters'][0]['ParameterName'].should.equal('foo')
db_parameters['Parameters'][0]['ParameterValue'].should.equal('foo_val')
db_parameters['Parameters'][0]['Description'].should.equal('test param')
db_parameters['Parameters'][0]['ApplyMethod'].should.equal('immediate')
@mock_rds2
def test_delete_non_existant_db_parameter_group():
conn = boto3.client('rds', region_name='us-west-2')
conn.delete_db_parameter_group.when.called_with(
DBParameterGroupName='non-existant').should.throw(ClientError)
@mock_rds2
def test_create_parameter_group_with_tags():
conn = boto3.client('rds', region_name='us-west-2')
conn.create_db_parameter_group(DBParameterGroupName='test',
DBParameterGroupFamily='mysql5.6',
Description='test parameter group',
Tags=[{
'Key': 'foo',
'Value': 'bar',
}])
result = conn.list_tags_for_resource(
ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test')
result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}])
|
|
"""Data generators for translation data-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from data_generators import problem
from data_generators import text_encoder
from data_generators import text_problems
from data_generators import translate
from utils import registry
FLAGS = tf.flags.FLAGS
# End-of-sentence marker.
EOS = text_encoder.EOS_ID
_ENFR_TRAIN_SMALL_DATA = [
[
"https://s3.amazonaws.com/opennmt-trainingdata/baseline-1M-enfr.tgz",
("baseline-1M-enfr/baseline-1M_train.en",
"baseline-1M-enfr/baseline-1M_train.fr")
],
]
_ENFR_TEST_SMALL_DATA = [
[
"https://s3.amazonaws.com/opennmt-trainingdata/baseline-1M-enfr.tgz",
("baseline-1M-enfr/baseline-1M_valid.en",
"baseline-1M-enfr/baseline-1M_valid.fr")
],
]
_ENFR_TRAIN_LARGE_DATA = [
[
"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
("commoncrawl.fr-en.en", "commoncrawl.fr-en.fr")
],
[
"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
("training/europarl-v7.fr-en.en", "training/europarl-v7.fr-en.fr")
],
[
"http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz",
("training/news-commentary-v9.fr-en.en",
"training/news-commentary-v9.fr-en.fr")
],
[
"http://www.statmt.org/wmt10/training-giga-fren.tar",
("giga-fren.release2.fixed.en.gz",
"giga-fren.release2.fixed.fr.gz")
],
[
"http://www.statmt.org/wmt13/training-parallel-un.tgz",
("un/undoc.2000.fr-en.en", "un/undoc.2000.fr-en.fr")
],
]
_ENFR_TEST_LARGE_DATA = [
[
"http://data.statmt.org/wmt17/translation-task/dev.tgz",
("dev/newstest2013.en", "dev/newstest2013.fr")
],
]
@registry.register_problem
class TranslateEnfrWmtSmall8k(translate.TranslateProblem):
"""Problem spec for WMT En-Fr translation."""
@property
def approx_vocab_size(self):
return 2**13 # 8192
@property
def use_small_dataset(self):
return True
def source_data_files(self, dataset_split):
train = dataset_split == problem.DatasetSplit.TRAIN
if self.use_small_dataset:
datasets = _ENFR_TRAIN_SMALL_DATA if train else _ENFR_TEST_SMALL_DATA
else:
datasets = _ENFR_TRAIN_LARGE_DATA if train else _ENFR_TEST_LARGE_DATA
return datasets
def vocab_data_files(self):
return (_ENFR_TRAIN_SMALL_DATA if self.use_small_dataset
else _ENFR_TRAIN_LARGE_DATA)
@registry.register_problem
class TranslateEnfrWmtSmall32k(TranslateEnfrWmtSmall8k):
@property
def approx_vocab_size(self):
return 2**15 # 32768
@registry.register_problem
class TranslateEnfrWmt8k(TranslateEnfrWmtSmall8k):
@property
def use_small_dataset(self):
return False
@registry.register_problem
class TranslateEnfrWmt32k(TranslateEnfrWmtSmall32k):
@property
def use_small_dataset(self):
return False
@registry.register_problem
class TranslateEnfrWmt32kPacked(TranslateEnfrWmt32k):
@property
def packed_length(self):
return 256
@property
def vocab_filename(self):
return TranslateEnfrWmt32k().vocab_filename
@registry.register_problem
class TranslateEnfrWmt32kWithBacktranslateFr(TranslateEnfrWmt32k):
"""En-Fr translation with added French data, back-translated."""
@property
def vocab_filename(self):
return TranslateEnfrWmt32k().vocab_filename
@property
def already_shuffled(self):
return True
@property
def skip_random_fraction_when_training(self):
return False
@property
def backtranslate_data_filenames(self):
"""List of pairs of files with matched back-translated data."""
# Files must be placed in tmp_dir, each similar size to authentic data.
return [("fr_mono_en.txt", "fr_mono_fr.txt")]
@property
def dataset_splits(self):
"""Splits of data to produce and number of output shards for each."""
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 1, # Use just 1 shard so as to not mix data.
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 1,
}]
def generate_samples(self, data_dir, tmp_dir, dataset_split):
datasets = self.source_data_files(dataset_split)
tag = "train" if dataset_split == problem.DatasetSplit.TRAIN else "dev"
data_path = translate.compile_data(
tmp_dir, datasets, "%s-compiled-%s" % (self.name, tag))
# Iterator over authentic data.
it_auth = text_problems.text2text_txt_iterator(
data_path + ".lang1", data_path + ".lang2")
# For eval, use authentic data.
if dataset_split != problem.DatasetSplit.TRAIN:
for example in it_auth:
yield example
else: # For training, mix synthetic and authentic data as follows.
for (file1, file2) in self.backtranslate_data_filenames:
path1 = os.path.join(tmp_dir, file1)
path2 = os.path.join(tmp_dir, file2)
# Synthetic data first.
for example in text_problems.text2text_txt_iterator(path1, path2):
yield example
# Now authentic data.
for example in it_auth:
yield example
@registry.register_problem
class TranslateEnfrWmt32kWithBacktranslateEn(
TranslateEnfrWmt32kWithBacktranslateFr):
"""En-Fr translation with added English data, back-translated."""
@property
def backtranslate_data_filenames(self):
"""List of pairs of files with matched back-translated data."""
# Files must be placed in tmp_dir, each similar size to authentic data.
return [("en_mono_en.txt%d" % i, "en_mono_fr.txt%d" % i) for i in [0, 1, 2]]
@registry.register_problem
class TranslateEnfrWmtSmallCharacters(translate.TranslateProblem):
"""Problem spec for WMT En-Fr translation."""
@property
def vocab_type(self):
return text_problems.VocabType.CHARACTER
@property
def use_small_dataset(self):
return True
def source_data_files(self, dataset_split):
train = dataset_split == problem.DatasetSplit.TRAIN
if self.use_small_dataset:
datasets = _ENFR_TRAIN_SMALL_DATA if train else _ENFR_TEST_SMALL_DATA
else:
datasets = _ENFR_TRAIN_LARGE_DATA if train else _ENFR_TEST_LARGE_DATA
return datasets
@registry.register_problem
class TranslateEnfrWmtCharacters(TranslateEnfrWmtSmallCharacters):
@property
def use_small_dataset(self):
return False
|
|
#!/usr/bin/env python
import argparse
import os
import sys
from functools import wraps
from inspect import Parameter
from pprint import pprint
import requests
from halocoin import custom
from halocoin import engine
from halocoin import tools
class Colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
actions = dict()
connection_port = 7899
host = os.environ.get('HALOCOIN_API_HOST', 'localhost')
def action(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
global actions
actions[func.__name__] = wrapper
return wrapper
def make_api_request(method, files=None, **kwargs):
from requests_toolbelt import MultipartEncoder
if files is None:
files = {}
url = "http://" + str(host) + ":" + str(connection_port) + "/" + method
kwargs = {k: v for k, v in kwargs.items() if v is not None}
if len(files) > 0:
fields = {}
fields.update(kwargs)
fields.update(files)
m = MultipartEncoder(fields=fields)
response = requests.post(url, data=m, headers={'Content-Type': m.content_type})
else:
response = requests.post(url, data=kwargs)
if response.status_code != 200:
return {
'error': response.status_code,
'message': response.text
}
else:
return response.json()
def extract_configuration(dir, config):
if dir is None:
working_dir = tools.get_default_dir()
else:
working_dir = dir
working_dir = os.path.join(working_dir, str(custom.version))
if os.path.exists(working_dir) and not os.path.isdir(working_dir):
print("Given path {} is not a directory.".format(working_dir))
exit(1)
elif not os.path.exists(working_dir):
print("Given path {} does not exist. Attempting to create...".format(working_dir))
try:
os.makedirs(working_dir)
print("Successful")
except OSError:
print("Could not create a directory!")
exit(1)
if config is not None:
config = custom.read_config_file(config)
elif os.path.exists(os.path.join(working_dir, 'config')):
config = os.path.join(working_dir, 'config')
config = custom.read_config_file(config)
else:
config = custom.generate_default_config()
custom.write_config_file(config, os.path.join(working_dir, 'config'))
if config is None:
raise ValueError('Couldn\'t parse config file {}'.format(config))
return config, working_dir
@action
def start(dir=None, config=None):
config, working_dir = extract_configuration(dir, config)
tools.init_logging(config['DEBUG'], working_dir, config['logging']['file'])
engine.main(config, working_dir)
@action
def new_wallet(wallet, pw):
from getpass import getpass
if pw is None:
wallet_pw = 'w'
wallet_pw_2 = 'w2'
while wallet_pw != wallet_pw_2:
wallet_pw = getpass('New wallet password: ')
wallet_pw_2 = getpass('New wallet password(again): ')
else:
wallet_pw = pw
print(make_api_request("new_wallet", wallet_name=wallet, password=wallet_pw))
@action
def info_wallet(wallet=None, pw=None):
from getpass import getpass
if pw is None:
wallet_pw = getpass('Wallet password: ')
else:
wallet_pw = pw
information = make_api_request("info_wallet", wallet_name=wallet, password=wallet_pw)
if isinstance(information, dict):
print("Address: {}".format(information['address']))
print("Balance: {}".format(information['balance']))
print("Pubkey: {}".format(information['pubkey']))
print("Privkey: {}".format(information['privkey']))
else:
pprint(information)
@action
def upload_wallet(file, wallet):
files = {
"wallet_file": ('wallet_file', open(file, 'rb')),
"wallet_name": wallet
}
print(make_api_request("upload_wallet", files=files))
@action
def download_wallet(wallet):
print(make_api_request("download_wallet", wallet_name=wallet))
@action
def blocks(start, end=None):
_blocks = make_api_request("blocks", start=start, end=end)
pprint(_blocks)
@action
def blockcount():
result = make_api_request("blockcount")
print('We have {} blocks.'.format(result['length']))
if result['length'] != result['known_length']:
print('Peers are reporting {} blocks.'.format(result['known_length']))
@action
def balance(address=None):
print(make_api_request("balance", address=address))
@action
def node_id():
print(make_api_request("node_id"))
@action
def send(address, amount, pw, wallet=None, message=None):
from getpass import getpass
if pw is None:
wallet_pw = getpass('Wallet password: ')
else:
wallet_pw = pw
print(make_api_request(action, address=address,
amount=amount, message=message,
wallet_name=wallet, password=wallet_pw))
@action
def peers():
peers = make_api_request("peers")
pprint(peers)
@action
def history(address):
history = make_api_request("history", address=address)
pprint(history)
@action
def stop():
print(make_api_request("stop"))
@action
def start_miner(pw, wallet=None):
print(make_api_request("start_miner", wallet_name=wallet, password=pw))
@action
def stop_miner():
print(make_api_request("stop_miner"))
@action
def status_miner():
print(make_api_request("status_miner"))
@action
def difficulty():
result = make_api_request("difficulty")
if isinstance(result, bytearray):
print(result.hex())
else:
print(result)
@action
def mempool():
txs = make_api_request("mempool")
pprint(txs)
def run(argv):
parser = argparse.ArgumentParser(description='CLI for halocoin.')
parser.add_argument('action', choices=sorted(actions.keys()),
help="Main action to perform by this CLI.")
parser.add_argument('--version', action='version', version='%(prog)s ' + custom.version)
parser.add_argument('--address', action="store", type=str, dest='address',
help='Give a valid blockchain address')
parser.add_argument('--message', action="store", type=str, dest='message',
help='Message to send with transaction')
parser.add_argument('--amount', action="store", type=int, dest='amount',
help='Amount of coins that are going to be used')
parser.add_argument('--start', metavar='<integer>', action="store", type=str, dest='start',
help='Starting number while requesting range of blocks')
parser.add_argument('--end', metavar='<integer>', action="store", type=str, dest='end',
help='Ending number while requesting range of blocks')
parser.add_argument('--file', metavar='/file/path', action="store", type=str, dest='file',
help='File path for wallet upload')
parser.add_argument('--wallet', metavar='my_wallet', action="store", type=str, dest='wallet',
help='Wallet name')
parser.add_argument('--config', action="store", type=str, dest='config',
help='Config file address. Use with start command.')
parser.add_argument('--pw', action="store", type=str, dest='pw',
help='NOT RECOMMENDED! If you want to pass wallet password as argument.')
parser.add_argument('--dir', action="store", type=str, dest='dir',
help='Directory for halocoin to use.')
parser.add_argument('--port', action="store", type=int, dest='port',
help='Override API port defined in config file.')
parser.add_argument('--force', action="store_true", dest='force',
help='Force something that makes trouble.')
args = parser.parse_args(argv[1:])
config, working_dir = extract_configuration(args.dir, args.config)
global connection_port
connection_port = config['port']['api']
from inspect import signature
sig = signature(actions[args.action])
kwargs = {}
for parameter in sig.parameters.keys():
if sig.parameters[parameter].default == Parameter.empty and \
(not hasattr(args, parameter) or getattr(args, parameter) is None):
sys.stderr.write("\"{}\" requires parameter {}\n".format(args.action, parameter))
sys.exit(1)
kwargs[parameter] = getattr(args, parameter)
actions[args.action](**kwargs)
return
def main():
if sys.stdin.isatty():
run(sys.argv)
else:
argv = sys.stdin.read().split(' ')
run(argv)
if __name__ == '__main__':
run(sys.argv)
|
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import textwrap
import mock
import pep8
from nova.hacking import checks
from nova import test
class HackingTestCase(test.NoDBTestCase):
"""This class tests the hacking checks in nova.hacking.checks by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method. The parameter names in the check method dictate what type of
object is passed to the check method. The parameter types are::
logical_line: A processed line with the following modifications:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with "xxx" of same length.
- Comments removed.
physical_line: Raw line of text from the input file.
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
total_lines: number of lines in the input file
blank_lines: blank lines before this one
indent_char: indentation character in this file (" " or "\t")
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
filename: Path of the file being run through pep8
When running a test on a check method the return will be False/None if
there is no violation in the sample input. If there is an error a tuple is
returned with a position in the line, and a message. So to check the result
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
def test_virt_driver_imports(self):
expect = (0, "N311: importing code from other virt drivers forbidden")
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"import nova.virt.libvirt.utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/libvirt/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"import nova.virt.firewall",
"./nova/virt/libvirt/firewall.py"))
def test_virt_driver_config_vars(self):
self.assertIsInstance(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/xenapi/driver.py"), tuple)
self.assertIsNone(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/libvirt/volume.py"))
def test_no_author_tags(self):
self.assertIsInstance(checks.no_author_tags("# author: jogo"), tuple)
self.assertIsInstance(checks.no_author_tags("# @author: jogo"), tuple)
self.assertIsInstance(checks.no_author_tags("# @Author: jogo"), tuple)
self.assertIsInstance(checks.no_author_tags("# Author: jogo"), tuple)
self.assertIsInstance(checks.no_author_tags(".. moduleauthor:: jogo"),
tuple)
self.assertIsNone(checks.no_author_tags("# authorization of this"))
self.assertEqual(2, checks.no_author_tags("# author: jogo")[0])
self.assertEqual(2, checks.no_author_tags("# Author: jogo")[0])
self.assertEqual(3, checks.no_author_tags(".. moduleauthor:: jogo")[0])
def test_assert_true_instance(self):
self.assertEqual(len(list(checks.assert_true_instance(
"self.assertTrue(isinstance(e, "
"exception.BuildAbortException))"))), 1)
self.assertEqual(
len(list(checks.assert_true_instance("self.assertTrue()"))), 0)
def test_assert_equal_type(self):
self.assertEqual(len(list(checks.assert_equal_type(
"self.assertEqual(type(als['QuicAssist']), list)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_type("self.assertTrue()"))), 0)
def test_assert_equal_none(self):
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))), 1)
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(None, A)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_none("self.assertIsNone()"))), 0)
def test_no_translate_debug_logs(self):
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.debug(_('foo'))", "nova/scheduler/foo.py"))), 1)
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.debug('foo')", "nova/scheduler/foo.py"))), 0)
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.info(_('foo'))", "nova/scheduler/foo.py"))), 0)
def test_no_setting_conf_directly_in_tests(self):
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = 1", "nova/tests/test_foo.py"))), 1)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.group.option = 1", "nova/tests/test_foo.py"))), 1)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = foo = 1", "nova/tests/test_foo.py"))), 1)
# Shouldn't fail with comparisons
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option == 'foo'", "nova/tests/test_foo.py"))), 0)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option != 1", "nova/tests/test_foo.py"))), 0)
# Shouldn't fail since not in nova/tests/
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = 1", "nova/compute/foo.py"))), 0)
def test_log_translations(self):
logs = ['audit', 'error', 'info', 'warn', 'warning', 'critical',
'exception']
levels = ['_LI', '_LW', '_LE', '_LC']
debug = "LOG.debug('OK')"
self.assertEqual(0,
len(list(
checks.validate_log_translations(debug, debug, 'f'))))
for log in logs:
bad = 'LOG.%s("Bad")' % log
self.assertEqual(1,
len(list(
checks.validate_log_translations(bad, bad, 'f'))))
ok = "LOG.%s(_('OK'))" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
ok = "LOG.%s('OK') # noqa" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
ok = "LOG.%s(variable)" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
for level in levels:
ok = "LOG.%s(%s('OK'))" % (log, level)
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
def test_no_mutable_default_args(self):
self.assertEqual(1, len(list(checks.no_mutable_default_args(
" def fake_suds_context(calls={}):"))))
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def get_info_from_bdm(virt_type, bdm, mapping=[])"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined = []"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined, undefined = [], {}"))))
def test_check_explicit_underscore_import(self):
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder/tests/other_files.py"))), 1)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files.py"))), 1)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _, _LW",
"cinder/tests/other_files2.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files2.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"_ = translations.ugettext",
"cinder/tests/other_files3.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files3.py"))), 0)
def test_use_jsonutils(self):
def __get_msg(fun):
msg = ("N324: jsonutils.%(fun)s must be used instead of "
"json.%(fun)s" % {'fun': fun})
return [(0, msg)]
for method in ('dump', 'dumps', 'load', 'loads'):
self.assertEqual(
__get_msg(method),
list(checks.use_jsonutils("json.%s(" % method,
"./nova/virt/xenapi/driver.py")))
self.assertEqual(0,
len(list(checks.use_jsonutils("json.%s(" % method,
"./plugins/xenserver/script.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils("jsonx.%s(" % method,
"./nova/virt/xenapi/driver.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils("json.dumb",
"./nova/virt/xenapi/driver.py"))))
# We are patching pep8 so that only the check under test is actually
# installed.
@mock.patch('pep8._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def _run_check(self, code, checker):
pep8.register_check(checker)
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def _assert_has_errors(self, code, checker, expected_errors=None):
actual_errors = [e[:3] for e in self._run_check(code, checker)]
self.assertEqual(expected_errors or [], actual_errors)
def test_str_exception(self):
checker = checks.CheckForStrExc
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = str(e)
return p
"""
errors = [(5, 16, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = unicode(e)
return p
"""
errors = []
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + str(ve)
p = unicode(e)
return p
"""
errors = [(8, 20, 'N325'), (8, 29, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
def test_trans_add(self):
checker = checks.CheckForTransAdd
code = """
def fake_tran(msg):
return msg
_ = fake_tran
_LI = _
_LW = _
_LE = _
_LC = _
def f(a, b):
msg = _('test') + 'add me'
msg = _LI('test') + 'add me'
msg = _LW('test') + 'add me'
msg = _LE('test') + 'add me'
msg = _LC('test') + 'add me'
msg = 'add to me' + _('test')
return msg
"""
errors = [(13, 10, 'N326'), (14, 10, 'N326'), (15, 10, 'N326'),
(16, 10, 'N326'), (17, 10, 'N326'), (18, 24, 'N326')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
msg = 'test' + 'add me'
return msg
"""
errors = []
self._assert_has_errors(code, checker, expected_errors=errors)
|
|
# Copyright 2013 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import logging
import re
import webob.dec
import webob.exc
LOG = logging.getLogger('aversion')
SLASH_RE = re.compile('/+')
def quoted_split(string, sep, quotes='"'):
"""
Split a string on the given separation character, but respecting
double-quoted sections of the string. Returns an iterator.
:param string: The string to split.
:param sep: The character separating sections of the string.
:param quotes: A string specifying all legal quote characters.
:returns: An iterator which will iterate over each element of the
string separated by the designated separator.
"""
# Initialize the algorithm
start = None
escape = False
quote = False
# Walk through the string
for i, c in enumerate(string):
# Save the start index
if start is None:
start = i
# Handle escape sequences
if escape:
escape = False
# Handle quoted strings
elif quote:
if c == '\\':
escape = True
elif c == quote:
quote = False
# Handle the separator
elif c == sep:
yield string[start:i]
start = None
# Handle quotes
elif c in quotes:
quote = c
# Yield the last part
if start is not None:
yield string[start:]
def unquote(quoted):
"""
Unquotes a value, as drawn from a header.
Note: This does not use the real unquoting algorithm, but what
browsers are actually using for quoting. Internet Explorer (and
probably some other browsers) fails to apply the proper quoting
algorithm. Thus, the algorithm used is simply to remove the
quotes.
:param quoted: The quoted string.
:returns: The string with the quoting removed.
"""
if quoted[:1] == '"' and quoted[-1:] == '"':
return quoted[1:-1]
return quoted
def parse_ctype(ctype):
"""
Parse a content type.
:param ctype: The content type, with corresponding parameters.
:returns: A tuple of the content type and a dictionary containing
the content type parameters. The content type will
additionally be available in the dictionary as the '_'
key.
"""
result_ctype = None
result = {}
for part in quoted_split(ctype, ';'):
# Extract the content type first
if result_ctype is None:
result_ctype = part
result['_'] = part
continue
# OK, we have a 'key' or 'key=value' to handle; figure it
# out...
equal = part.find('=')
if equal > 0 and part.find('"', 0, equal) < 0:
result[part[:equal]] = unquote(part[equal + 1:])
else:
# If equal > 0 but it's preceded by a ", it's seriously
# messed up, but go ahead and be liberal...
result[part] = True
# If we failed to parse a content type out, return an empty
# content type
if result_ctype is None:
result_ctype = ''
return result_ctype, result
def _match_mask(mask, ctype):
"""
Determine if a content type mask matches a given content type.
:param mask: The content type mask, taken from the Accept
header.
:param ctype: The content type to match to the mask.
"""
# Handle the simple cases first
if '*' not in mask:
return ctype == mask
elif mask == '*/*':
return True
elif not mask.endswith('/*'):
return False
mask_major = mask[:-2]
ctype_major = ctype.split('/', 1)[0]
return ctype_major == mask_major
def best_match(requested, allowed):
"""
Determine the best content type to use for the request.
:param ctypes: A list of the available content types.
:returns: A tuple of the best match content type and the
parameters for that content type.
"""
requested = [parse_ctype(ctype) for ctype in quoted_split(requested, ',')]
best_q = -1
best_ctype = ''
best_params = {}
best_match = '*/*'
# Walk the list of content types
for ctype in allowed:
# Compare to the accept list
for ctype_mask, params in requested:
try:
q = float(params.get('q', 1.0))
except ValueError:
# Bad quality value
continue
if q < best_q:
# Not any better
continue
elif best_q == q:
# Base on the best match
if best_match.count('*') <= ctype_mask.count('*'):
continue
# OK, see if we have a match
if _match_mask(ctype_mask, ctype):
best_q = q
best_ctype = ctype
best_params = params
best_match = ctype_mask
# Return the best match
return best_ctype, best_params
class TypeRule(object):
"""
Represents a basic rule for content type interpretation.
"""
def __init__(self, ctype, version, params):
"""
Initialize a TypeRule object.
:param ctype: The resultant content type. If None, the
existing content type will be used; otherwise,
the content type will be formed by formatting
the string, using the parameter dictionary.
:param version: The resultant version. If None, no version
will be returned; otherwise, the version will
be formed by formatting the string, using the
parameter dictionary.
:param params: Extra parameters. These are unused by
AVersion, but are included in the configuration
made available through the 'aversion.config'
WSGI environment variable.
"""
self.ctype = ctype
self.version = version
self.params = params
def __call__(self, params):
"""
Evaluate a TypeRule.
:param params: A dictionary of content type parameters. This
dictionary must contain the key '_', which must
be the content type being passed in.
:returns: A tuple of the final content type and version.
"""
# Determine the desired content type
try:
ctype = (self.ctype % params) if self.ctype else params['_']
except KeyError:
# Treat it as undefined rather than defaulted
ctype = None
# Determine the desired version
try:
version = (self.version % params) if self.version else None
except KeyError:
version = None
return ctype, version
class Result(object):
"""
Helper class to maintain results for the version and content type
selection algorithm.
"""
def __init__(self):
"""
Initialize a Result.
"""
self.version = None
self.ctype = None
self.orig_ctype = None
def __nonzero__(self):
"""
Return True only when the Result object is completely
populated.
"""
return self.version is not None and self.ctype is not None
def set_version(self, version):
"""
Set the selected version. Will not override the value of the
version if that has already been determined.
:param version: The version string to set.
"""
if self.version is None:
self.version = version
def set_ctype(self, ctype, orig_ctype=None):
"""
Set the selected content type. Will not override the value of
the content type if that has already been determined.
:param ctype: The content type string to set.
:param orig_ctype: The original content type, as found in the
configuration.
"""
if self.ctype is None:
self.ctype = ctype
self.orig_ctype = orig_ctype
def _set_key(log_prefix, result_dict, key, value, desc="parameter"):
"""
Helper to set a key value in a dictionary. This function issues a
warning if the key has already been set, and issues a warning and
returns without setting the value if the value is not surrounded
by parentheses. This is used to eliminate duplicated code from
the rule parsers below.
:param log_prefix: A prefix to use in log messages. This should
be the configuration key.
:param result_dict: A dictionary of results, into which the key
and value should be inserted.
:param key: The dictionary key to insert.
:param value: The value to insert into the dictionary.
:param desc: A description of what the dictionary is. This is
used in log messages help the user understand what
the log message is referring to. By default, this
description is "parameter", indicating that entries
in the dictionary are parameters of something;
however, _parse_type_rule() also uses "token type" to
help identify its more complex tokens.
"""
if key in result_dict:
LOG.warn("%s: Duplicate value for %s %r" %
(log_prefix, desc, key))
# Allow the overwrite
# Demand the value be quoted
if len(value) <= 2 or value[0] not in ('"', "'") or value[0] != value[-1]:
LOG.warn("%s: Invalid value %r for %s %r" %
(log_prefix, value, desc, key))
return
# Save the value
result_dict[key] = value[1:-1]
def _parse_version_rule(loader, version, verspec):
"""
Parse a version rule. The first token is the name of the
application implementing that API version. The remaining tokens
are key="quoted value" pairs that specify parameters; these
parameters are ignored by AVersion, but may be used by the
application.
:param loader: An object with a get_app() method, which will be
used to load the actual applications.
:param version: The version name.
:param verspec: The version text, described above.
:returns: A dictionary of three keys: "app" is the application;
"name" is the version identification string; and
"params" is a dictionary of parameters.
"""
result = dict(name=version, params={})
for token in quoted_split(verspec, ' ', quotes='"\''):
if not token:
continue
# Convert the application
if 'app' not in result:
result['app'] = loader.get_app(token)
continue
# What remains is key="quoted value" pairs...
key, _eq, value = token.partition('=')
# Set the parameter key
_set_key('version.%s' % version, result['params'], key, value)
# Make sure we have an application
if 'app' not in result:
raise ImportError("Cannot load application for version %r" % version)
return result
def _parse_alias_rule(alias, alias_spec):
"""
Parse an alias rule. The first token is the canonical name of the
version. The remaining tokens are key="quoted value" pairs that
specify parameters; these parameters are ignored by AVersion, but
may be used by the application.
:param alias: The alias name.
:param alias_spec: The alias text, described above.
:returns: A dictionary of three keys: "alias" is the alias name;
"version" is the canonical version identification
string; and "params" is a dictionary of parameters.
"""
result = dict(alias=alias, params={})
for token in quoted_split(alias_spec, ' ', quotes='"\''):
if not token:
continue
# Suck out the canonical version name
if 'version' not in result:
result['version'] = token
continue
# What remains is key="quoted value" pairs...
key, _eq, value = token.partition('=')
# Set the parameter key
_set_key('alias.%s' % alias, result['params'], key, value)
# Make sure we have a canonical version
if 'version' not in result:
raise KeyError("Cannot determine canonical version for alias %r" %
alias)
return result
def _parse_type_rule(ctype, typespec):
"""
Parse a content type rule. Unlike the other rules, content type
rules are more complex, since both selected content type and API
version must be expressed by one rule. The rule is split on
whitespace, then the components beginning with "type:" and
"version:" are selected; in both cases, the text following the ":"
character will be treated as a format string, which will be
formatted using a content parameter dictionary. Components
beginning with "param:" specify key="quoted value" pairs that
specify parameters; these parameters are ignored by AVersion, but
may be used by the application.
:param ctype: The content type the rule is for.
:param typespec: The rule text, described above.
:returns: An instance of TypeRule.
"""
params = {'param': {}}
for token in quoted_split(typespec, ' ', quotes='"\''):
if not token:
continue
tok_type, _sep, tok_val = token.partition(':')
# Validate the token type
if not tok_val:
LOG.warn("%s: Invalid type token %r" % (ctype, token))
continue
elif tok_type not in ('type', 'version', 'param'):
LOG.warn("%s: Unrecognized token type %r" % (ctype, tok_type))
continue
# Intercept 'param' clauses
if tok_type == 'param':
key, _eq, value = tok_val.partition('=')
# Set the parameter key
_set_key('type.%s' % ctype, params['param'], key, value)
continue
# Set the token value
_set_key('type.%s' % ctype, params, tok_type, tok_val,
desc="token type")
return TypeRule(ctype=params.get('type'),
version=params.get('version'),
params=params['param'])
def _uri_normalize(uri):
"""
Normalize a URI. Multiple slashes are collapsed into a single
'/', a leading '/' is added, and trailing slashes are removed.
:param uri: The URI to normalize.
:returns: The normalized URI.
"""
return '/' + SLASH_RE.sub('/', uri).strip('/')
class AVersion(object):
"""
A composite application for PasteDeploy-based WSGI stacks which
selects the version of an API and the requested content type based
on criteria including URI prefix and suffix and content type
parameters.
"""
def __init__(self, loader, global_conf, **local_conf):
"""
Initialize an AVersion object.
:param loader: An object with a get_app() method, which will
be used to load the actual applications.
:param global_conf: The global configuration. Ignored.
:param local_conf: The configuration for this application.
See the README.rst for a full discussion of
the defined keys and the meaning of their
values.
"""
# Process the configuration
self.overwrite_headers = True
self.version_app = None
self.versions = {}
self.aliases = {}
uris = {}
self.types = {}
self.formats = {}
for key, value in local_conf.items():
if key == 'version':
# The version application--what we call if no version
# is specified
self.version_app = loader.get_app(value)
elif key == 'overwrite_headers':
# Alter whether or not we overwrite the headers
value = value.lower()
if value in ('true', 't', 'on', 'yes', 'enable'):
self.overwrite_headers = True
elif value in ('false', 'f', 'off', 'no', 'disable'):
self.overwrite_headers = False
else:
try:
self.overwrite_headers = bool(int(value))
except ValueError:
LOG.warn("Unrecognized value %r for configuration "
"key 'overwrite_headers'" % value)
elif key.startswith('version.'):
# The application for a given version
self.versions[key[8:]] = _parse_version_rule(loader, key[8:],
value)
elif key.startswith('alias.'):
# An alias for a given version
self.aliases[key[6:]] = _parse_alias_rule(key[6:], value)
elif key.startswith('uri.'):
# A mapping between URI prefixes and versions; note
# that the URI is normalized
uris[_uri_normalize(key[4:])] = value
elif key.startswith('type.'):
# A mapping between a passed-in content type and the
# desired version and final content type
self.types[key[5:]] = _parse_type_rule(key[5:], value)
elif key[0] == '.':
# A mapping between a file extension and the desired
# content type
self.formats[key] = value
# We want to search URIs in the correct order
self.uris = sorted(uris.items(), key=lambda x: len(x[0]),
reverse=True)
# The versioning application may find it useful to have some
# introspection on the AVersion configuration, so build up a
# couple of data structures we can add to requests. We start
# with adding URI prefixes to the version descriptors...
for prefix, version in uris.items():
if version not in self.versions:
continue
# Add the prefixes to the version descriptor
self.versions[version].setdefault('prefixes', [])
self.versions[version]['prefixes'].append(prefix)
# Next, set up a list of type information
types = dict((ctype, dict(name=ctype, params=rule.params))
for ctype, rule in self.types.items())
# Add in information about the formats
for suffix, ctype in self.formats.items():
types.setdefault(ctype, dict(name=ctype, params={}))
types[ctype].setdefault('suffixes', [])
types[ctype]['suffixes'].append(suffix)
# Now, build the config dictionary tree we will pass to
# requests
self.config = dict(
versions=self.versions,
aliases=self.aliases,
types=types,
)
@webob.dec.wsgify
def __call__(self, request):
"""
Process a WSGI request, selecting the appropriate application
to pass the request to. In addition, if the desired content
type can be determined, the Accept header will be altered to
match.
:param request: The Request object provided by WebOb.
"""
# Process the request; broken out for easy override and
# testing
result = self._process(request)
# Add the config to the environment; we use a deep copy to
# avoid accidental overwrite of the data
request.environ['aversion.config'] = copy.deepcopy(self.config)
# Set the Accept header
if result.ctype:
request.environ['aversion.response_type'] = result.ctype
request.environ['aversion.orig_response_type'] = result.orig_ctype
request.environ['aversion.accept'] = request.headers.get('accept')
if self.overwrite_headers:
request.headers['accept'] = '%s;q=1.0' % result.ctype
# Determine the requested version; allows mapping through
# aliases to a canonical value
if result.version in self.aliases:
version = self.aliases[result.version]['version']
else:
version = result.version
# Select the correct application
try:
app = self.versions[version]['app']
request.environ['aversion.version'] = version
except KeyError:
app = self.version_app
request.environ['aversion.version'] = None
if app:
return request.get_response(app)
else:
return webob.exc.HTTPInternalServerError(
explanation='Cannot determine application to serve request')
def _process(self, request, result=None):
"""
Process the rules for the request.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in. If
None, one will be allocated.
:returns: A Result object, containing the selected version and
content type.
"""
# Allocate a result and process all the rules
result = result if result is not None else Result()
self._proc_uri(request, result)
self._proc_ctype_header(request, result)
self._proc_accept_header(request, result)
return result
def _proc_uri(self, request, result):
"""
Process the URI rules for the request. Both the desired API
version and desired content type can be determined from those
rules.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in.
"""
if result:
# Result has already been fully determined
return
# First, determine the version based on the URI prefix
for prefix, version in self.uris:
if (request.path_info == prefix or
request.path_info.startswith(prefix + '/')):
result.set_version(version)
# Update the request particulars
request.script_name += prefix
request.path_info = request.path_info[len(prefix):]
if not request.path_info:
request.path_info = '/'
break
# Next, determine the content type based on the URI suffix
for format, ctype in self.formats.items():
if request.path_info.endswith(format):
result.set_ctype(ctype)
# Update the request particulars
request.path_info = request.path_info[:-len(format)]
break
def _proc_ctype_header(self, request, result):
"""
Process the Content-Type header rules for the request. Only
the desired API version can be determined from those rules.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in.
"""
if result:
# Result has already been fully determined
return
try:
ctype = request.headers['content-type']
except KeyError:
# No content-type header to examine
return
# Parse the content type
ctype, params = parse_ctype(ctype)
# Is it a recognized content type?
if ctype not in self.types:
return
# Get the mapped ctype and version
mapped_ctype, mapped_version = self.types[ctype](params)
# Update the content type header and set the version
if mapped_ctype:
request.environ['aversion.request_type'] = mapped_ctype
request.environ['aversion.orig_request_type'] = ctype
request.environ['aversion.content_type'] = \
request.headers['content-type']
if self.overwrite_headers:
request.headers['content-type'] = mapped_ctype
if mapped_version:
result.set_version(mapped_version)
def _proc_accept_header(self, request, result):
"""
Process the Accept header rules for the request. Both the
desired API version and content type can be determined from
those rules.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in.
"""
if result:
# Result has already been fully determined
return
try:
accept = request.headers['accept']
except KeyError:
# No Accept header to examine
return
# Obtain the best-match content type and its parameters
ctype, params = best_match(accept, self.types.keys())
# Is it a recognized content type?
if ctype not in self.types:
return
# Get the mapped ctype and version
mapped_ctype, mapped_version = self.types[ctype](params)
# Set the content type and version
if mapped_ctype:
result.set_ctype(mapped_ctype, ctype)
if mapped_version:
result.set_version(mapped_version)
|
|
"""SCons.Tool.icl
Tool-specific initialization for the Intel C/C++ compiler.
Supports Linux and Windows compilers, v7 and up.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/intelc.py 4043 2009/02/23 09:06:45 scons"
import math, sys, os.path, glob, string, re
is_windows = sys.platform == 'win32'
is_win64 = is_windows and (os.environ['PROCESSOR_ARCHITECTURE'] == 'AMD64' or
(os.environ.has_key('PROCESSOR_ARCHITEW6432') and
os.environ['PROCESSOR_ARCHITEW6432'] == 'AMD64'))
is_linux = sys.platform == 'linux2'
is_mac = sys.platform == 'darwin'
if is_windows:
import SCons.Tool.msvc
elif is_linux:
import SCons.Tool.gcc
elif is_mac:
import SCons.Tool.gcc
import SCons.Util
import SCons.Warnings
# Exceptions for this tool
class IntelCError(SCons.Errors.InternalError):
pass
class MissingRegistryError(IntelCError): # missing registry entry
pass
class MissingDirError(IntelCError): # dir not found
pass
class NoRegistryModuleError(IntelCError): # can't read registry at all
pass
def uniquify(s):
"""Return a sequence containing only one copy of each unique element from input sequence s.
Does not preserve order.
Input sequence must be hashable (i.e. must be usable as a dictionary key)."""
u = {}
for x in s:
u[x] = 1
return u.keys()
def linux_ver_normalize(vstr):
"""Normalize a Linux compiler version number.
Intel changed from "80" to "9.0" in 2005, so we assume if the number
is greater than 60 it's an old-style number and otherwise new-style.
Always returns an old-style float like 80 or 90 for compatibility with Windows.
Shades of Y2K!"""
# Check for version number like 9.1.026: return 91.026
m = re.match(r'([0-9]+)\.([0-9]+)\.([0-9]+)', vstr)
if m:
vmaj,vmin,build = m.groups()
return float(vmaj) * 10 + float(vmin) + float(build) / 1000.;
else:
f = float(vstr)
if is_windows:
return f
else:
if f < 60: return f * 10.0
else: return f
def check_abi(abi):
"""Check for valid ABI (application binary interface) name,
and map into canonical one"""
if not abi:
return None
abi = abi.lower()
# valid_abis maps input name to canonical name
if is_windows:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'ia64' : 'ia64',
'em64t' : 'em64t',
'amd64' : 'em64t'}
if is_linux:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'x86_64' : 'x86_64',
'em64t' : 'x86_64',
'amd64' : 'x86_64'}
if is_mac:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'x86_64' : 'x86_64',
'em64t' : 'x86_64'}
try:
abi = valid_abis[abi]
except KeyError:
raise SCons.Errors.UserError, \
"Intel compiler: Invalid ABI %s, valid values are %s"% \
(abi, valid_abis.keys())
return abi
def vercmp(a, b):
"""Compare strings as floats,
but Intel changed Linux naming convention at 9.0"""
return cmp(linux_ver_normalize(b), linux_ver_normalize(a))
def get_version_from_list(v, vlist):
"""See if we can match v (string) in vlist (list of strings)
Linux has to match in a fuzzy way."""
if is_windows:
# Simple case, just find it in the list
if v in vlist: return v
else: return None
else:
# Fuzzy match: normalize version number first, but still return
# original non-normalized form.
fuzz = 0.001
for vi in vlist:
if math.fabs(linux_ver_normalize(vi) - linux_ver_normalize(v)) < fuzz:
return vi
# Not found
return None
def get_intel_registry_value(valuename, version=None, abi=None):
"""
Return a value from the Intel compiler registry tree. (Windows only)
"""
# Open the key:
if is_win64:
K = 'Software\\Wow6432Node\\Intel\\Compilers\\C++\\' + version + '\\'+abi.upper()
else:
K = 'Software\\Intel\\Compilers\\C++\\' + version + '\\'+abi.upper()
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
except SCons.Util.RegError:
raise MissingRegistryError, \
"%s was not found in the registry, for Intel compiler version %s, abi='%s'"%(K, version,abi)
# Get the value:
try:
v = SCons.Util.RegQueryValueEx(k, valuename)[0]
return v # or v.encode('iso-8859-1', 'replace') to remove unicode?
except SCons.Util.RegError:
raise MissingRegistryError, \
"%s\\%s was not found in the registry."%(K, valuename)
def get_all_compiler_versions():
"""Returns a sorted list of strings, like "70" or "80" or "9.0"
with most recent compiler version first.
"""
versions=[]
if is_windows:
if is_win64:
keyname = 'Software\\WoW6432Node\\Intel\\Compilers\\C++'
else:
keyname = 'Software\\Intel\\Compilers\\C++'
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
keyname)
except WindowsError:
return []
i = 0
versions = []
try:
while i < 100:
subkey = SCons.Util.RegEnumKey(k, i) # raises EnvironmentError
# Check that this refers to an existing dir.
# This is not 100% perfect but should catch common
# installation issues like when the compiler was installed
# and then the install directory deleted or moved (rather
# than uninstalling properly), so the registry values
# are still there.
ok = False
for try_abi in ('IA32', 'IA32e', 'IA64', 'EM64T'):
try:
d = get_intel_registry_value('ProductDir', subkey, try_abi)
except MissingRegistryError:
continue # not found in reg, keep going
if os.path.exists(d): ok = True
if ok:
versions.append(subkey)
else:
try:
# Registry points to nonexistent dir. Ignore this
# version.
value = get_intel_registry_value('ProductDir', subkey, 'IA32')
except MissingRegistryError, e:
# Registry key is left dangling (potentially
# after uninstalling).
print \
"scons: *** Ignoring the registry key for the Intel compiler version %s.\n" \
"scons: *** It seems that the compiler was uninstalled and that the registry\n" \
"scons: *** was not cleaned up properly.\n" % subkey
else:
print "scons: *** Ignoring "+str(value)
i = i + 1
except EnvironmentError:
# no more subkeys
pass
elif is_linux:
for d in glob.glob('/opt/intel_cc_*'):
# Typical dir here is /opt/intel_cc_80.
m = re.search(r'cc_(.*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/cc*/*'):
# Typical dir here is /opt/intel/cc/9.0 for IA32,
# /opt/intel/cce/9.0 for EMT64 (AMD64)
m = re.search(r'([0-9.]+)$', d)
if m:
versions.append(m.group(1))
elif is_mac:
for d in glob.glob('/opt/intel/cc*/*'):
# Typical dir here is /opt/intel/cc/9.0 for IA32,
# /opt/intel/cce/9.0 for EMT64 (AMD64)
m = re.search(r'([0-9.]+)$', d)
if m:
versions.append(m.group(1))
versions = uniquify(versions) # remove dups
versions.sort(vercmp)
return versions
def get_intel_compiler_top(version, abi):
"""
Return the main path to the top-level dir of the Intel compiler,
using the given version.
The compiler will be in <top>/bin/icl.exe (icc on linux),
the include dir is <top>/include, etc.
"""
if is_windows:
if not SCons.Util.can_read_reg:
raise NoRegistryModuleError, "No Windows registry module was found"
top = get_intel_registry_value('ProductDir', version, abi)
if not os.path.exists(os.path.join(top, "Bin", "icl.exe")):
raise MissingDirError, \
"Can't find Intel compiler in %s"%(top)
elif is_mac or is_linux:
# first dir is new (>=9.0) style, second is old (8.0) style.
dirs=('/opt/intel/cc/%s', '/opt/intel_cc_%s')
if abi == 'x86_64':
dirs=('/opt/intel/cce/%s',) # 'e' stands for 'em64t', aka x86_64 aka amd64
top=None
for d in dirs:
if os.path.exists(os.path.join(d%version, "bin", "icc")):
top = d%version
break
if not top:
raise MissingDirError, \
"Can't find version %s Intel compiler in %s (abi='%s')"%(version,top, abi)
return top
def generate(env, version=None, abi=None, topdir=None, verbose=0):
"""Add Builders and construction variables for Intel C/C++ compiler
to an Environment.
args:
version: (string) compiler version to use, like "80"
abi: (string) 'win32' or whatever Itanium version wants
topdir: (string) compiler top dir, like
"c:\Program Files\Intel\Compiler70"
If topdir is used, version and abi are ignored.
verbose: (int) if >0, prints compiler version used.
"""
if not (is_mac or is_linux or is_windows):
# can't handle this platform
return
if is_windows:
SCons.Tool.msvc.generate(env)
elif is_linux:
SCons.Tool.gcc.generate(env)
elif is_mac:
SCons.Tool.gcc.generate(env)
# if version is unspecified, use latest
vlist = get_all_compiler_versions()
if not version:
if vlist:
version = vlist[0]
else:
# User may have specified '90' but we need to get actual dirname '9.0'.
# get_version_from_list does that mapping.
v = get_version_from_list(version, vlist)
if not v:
raise SCons.Errors.UserError, \
"Invalid Intel compiler version %s: "%version + \
"installed versions are %s"%(', '.join(vlist))
version = v
# if abi is unspecified, use ia32
# alternatives are ia64 for Itanium, or amd64 or em64t or x86_64 (all synonyms here)
abi = check_abi(abi)
if abi is None:
if is_mac or is_linux:
# Check if we are on 64-bit linux, default to 64 then.
uname_m = os.uname()[4]
if uname_m == 'x86_64':
abi = 'x86_64'
else:
abi = 'ia32'
else:
if is_win64:
abi = 'em64t'
else:
abi = 'ia32'
if version and not topdir:
try:
topdir = get_intel_compiler_top(version, abi)
except (SCons.Util.RegError, IntelCError):
topdir = None
if not topdir:
# Normally this is an error, but it might not be if the compiler is
# on $PATH and the user is importing their env.
class ICLTopDirWarning(SCons.Warnings.Warning):
pass
if (is_mac or is_linux) and not env.Detect('icc') or \
is_windows and not env.Detect('icl'):
SCons.Warnings.enableWarningClass(ICLTopDirWarning)
SCons.Warnings.warn(ICLTopDirWarning,
"Failed to find Intel compiler for version='%s', abi='%s'"%
(str(version), str(abi)))
else:
# should be cleaned up to say what this other version is
# since in this case we have some other Intel compiler installed
SCons.Warnings.enableWarningClass(ICLTopDirWarning)
SCons.Warnings.warn(ICLTopDirWarning,
"Can't find Intel compiler top dir for version='%s', abi='%s'"%
(str(version), str(abi)))
if topdir:
if verbose:
print "Intel C compiler: using version %s (%g), abi %s, in '%s'"%\
(repr(version), linux_ver_normalize(version),abi,topdir)
if is_linux:
# Show the actual compiler version by running the compiler.
os.system('%s/bin/icc --version'%topdir)
if is_mac:
# Show the actual compiler version by running the compiler.
os.system('%s/bin/icc --version'%topdir)
env['INTEL_C_COMPILER_TOP'] = topdir
if is_linux:
paths={'INCLUDE' : 'include',
'LIB' : 'lib',
'PATH' : 'bin',
'LD_LIBRARY_PATH' : 'lib'}
for p in paths.keys():
env.PrependENVPath(p, os.path.join(topdir, paths[p]))
if is_mac:
paths={'INCLUDE' : 'include',
'LIB' : 'lib',
'PATH' : 'bin',
'LD_LIBRARY_PATH' : 'lib'}
for p in paths.keys():
env.PrependENVPath(p, os.path.join(topdir, paths[p]))
if is_windows:
# env key reg valname default subdir of top
paths=(('INCLUDE', 'IncludeDir', 'Include'),
('LIB' , 'LibDir', 'Lib'),
('PATH' , 'BinDir', 'Bin'))
# We are supposed to ignore version if topdir is set, so set
# it to the emptry string if it's not already set.
if version is None:
version = ''
# Each path has a registry entry, use that or default to subdir
for p in paths:
try:
path=get_intel_registry_value(p[1], version, abi)
# These paths may have $(ICInstallDir)
# which needs to be substituted with the topdir.
path=path.replace('$(ICInstallDir)', topdir + os.sep)
except IntelCError:
# Couldn't get it from registry: use default subdir of topdir
env.PrependENVPath(p[0], os.path.join(topdir, p[2]))
else:
env.PrependENVPath(p[0], string.split(path, os.pathsep))
# print "ICL %s: %s, final=%s"%(p[0], path, str(env['ENV'][p[0]]))
if is_windows:
env['CC'] = 'icl'
env['CXX'] = 'icl'
env['LINK'] = 'xilink'
else:
env['CC'] = 'icc'
env['CXX'] = 'icpc'
# Don't reset LINK here;
# use smart_link which should already be here from link.py.
#env['LINK'] = '$CC'
env['AR'] = 'xiar'
env['LD'] = 'xild' # not used by default
# This is not the exact (detailed) compiler version,
# just the major version as determined above or specified
# by the user. It is a float like 80 or 90, in normalized form for Linux
# (i.e. even for Linux 9.0 compiler, still returns 90 rather than 9.0)
if version:
env['INTEL_C_COMPILER_VERSION']=linux_ver_normalize(version)
if is_windows:
# Look for license file dir
# in system environment, registry, and default location.
envlicdir = os.environ.get("INTEL_LICENSE_FILE", '')
K = ('SOFTWARE\Intel\Licenses')
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
reglicdir = SCons.Util.RegQueryValueEx(k, "w_cpp")[0]
except (AttributeError, SCons.Util.RegError):
reglicdir = ""
defaultlicdir = r'C:\Program Files\Common Files\Intel\Licenses'
licdir = None
for ld in [envlicdir, reglicdir]:
# If the string contains an '@', then assume it's a network
# license (port@system) and good by definition.
if ld and (string.find(ld, '@') != -1 or os.path.exists(ld)):
licdir = ld
break
if not licdir:
licdir = defaultlicdir
if not os.path.exists(licdir):
class ICLLicenseDirWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(ICLLicenseDirWarning)
SCons.Warnings.warn(ICLLicenseDirWarning,
"Intel license dir was not found."
" Tried using the INTEL_LICENSE_FILE environment variable (%s), the registry (%s) and the default path (%s)."
" Using the default path as a last resort."
% (envlicdir, reglicdir, defaultlicdir))
env['ENV']['INTEL_LICENSE_FILE'] = licdir
def exists(env):
if not (is_mac or is_linux or is_windows):
# can't handle this platform
return 0
try:
versions = get_all_compiler_versions()
except (SCons.Util.RegError, IntelCError):
versions = None
detected = versions is not None and len(versions) > 0
if not detected:
# try env.Detect, maybe that will work
if is_windows:
return env.Detect('icl')
elif is_linux:
return env.Detect('icc')
elif is_mac:
return env.Detect('icc')
return detected
# end of file
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
"""Byrd-Omojokun Trust-Region SQP method."""
from __future__ import division, print_function, absolute_import
import scipy.sparse as spc
from .projections import projections
from .qp_subproblem import modified_dogleg, projected_cg, box_intersections
import numpy as np
from numpy.linalg import norm
__all__ = ['equality_constrained_sqp']
def default_scaling(x):
n, = np.shape(x)
return spc.eye(n)
def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess,
x0, fun0, grad0, constr0,
jac0, stop_criteria, state,
trust_lb=None,
trust_ub=None,
initial_penalty=1.0,
initial_trust_radius=1.0,
scaling=default_scaling,
return_all=False,
factorization_method=None):
"""Solve nonlinear equality-constrained problem using trust-region SQP.
Solve optimization problem:
minimize fun(x)
subject to: constr(x) = 0
using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several
implementation details are based on [2]_ and [3]_, p. 549.
References
----------
.. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the
implementation of an algorithm for large-scale equality
constrained optimization." SIAM Journal on
Optimization 8.3 (1998): 682-706.
.. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
"An interior point algorithm for large-scale nonlinear
programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
Second Edition (2006).
"""
PENALTY_FACTOR = 0.3 # Rho from formula (3.51), reference [2]_, p.891.
LARGE_REDUCTION_RATIO = 0.9
INTERMEDIARY_REDUCTION_RATIO = 0.3
SUFFICIENT_REDUCTION_RATIO = 1e-8 # Eta from reference [2]_, p.892.
TRUST_ENLARGEMENT_FACTOR_L = 7.0
TRUST_ENLARGEMENT_FACTOR_S = 2.0
MAX_TRUST_REDUCTION = 0.5
MIN_TRUST_REDUCTION = 0.1
SOC_THRESHOLD = 0.1
TR_FACTOR = 0.8 # Zeta from formula (3.21), reference [2]_, p.885.
BOX_FACTOR = 0.5
n, = np.shape(x0) # Number of parameters
# Set default lower and upper bounds.
if trust_lb is None:
trust_lb = np.full(n, -np.inf)
if trust_ub is None:
trust_ub = np.full(n, np.inf)
# Initial values
x = np.copy(x0)
trust_radius = initial_trust_radius
penalty = initial_penalty
# Compute Values
f = fun0
c = grad0
b = constr0
A = jac0
S = scaling(x)
# Get projections
Z, LS, Y = projections(A, factorization_method)
# Compute least-square lagrange multipliers
v = -LS.dot(c)
# Update state parameters
state.optimality = norm(c + A.T.dot(v), np.inf)
state.constr_violation = norm(b, np.inf) if len(b) > 0 else 0
state.niter += 1
state.x = x
state.v = v
state.fun = f
state.grad = c
state.constr = b
state.jac = A
state.trust_radius = trust_radius
state.penalty = penalty
if return_all:
state.allvecs += [np.copy(x)]
state.allmult += [np.copy(v)]
compute_hess = True
while not stop_criteria(state):
# Compute Lagrangian Hessian
if compute_hess:
H = lagr_hess(x, v)
state.nhev += 1
# Normal Step - `dn`
# minimize 1/2*||A dn + b||^2
# subject to:
# ||dn|| <= TR_FACTOR * trust_radius
# BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub.
dn = modified_dogleg(A, Y, b,
TR_FACTOR*trust_radius,
BOX_FACTOR*trust_lb,
BOX_FACTOR*trust_ub)
# Tangential Step - `dn`
# Solve the QP problem:
# minimize 1/2 dt.T H dt + dt.T (H dn + c)
# subject to:
# A dt = 0
# ||dt|| <= sqrt(trust_radius**2 - ||dn||**2)
# lb - dn <= dt <= ub - dn
c_t = H.dot(dn) + c
b_t = np.zeros_like(b)
trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2)
lb_t = trust_lb - dn
ub_t = trust_ub - dn
dt, info_cg = projected_cg(H, c_t, Z, Y, b_t,
trust_radius_t,
lb_t, ub_t)
# Compute update (normal + tangential steps).
d = dn + dt
# Compute second order model: 1/2 d H d + c.T d + f.
quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d)
# Compute linearized constraint: l = A d + b.
linearized_constr = A.dot(d)+b
# Compute new penalty parameter according to formula (3.52),
# reference [2]_, p.891.
vpred = norm(b) - norm(linearized_constr)
# Guarantee `vpred` always positive,
# regardless of roundoff errors.
vpred = max(1e-16, vpred)
previous_penalty = penalty
if quadratic_model > 0:
new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred)
penalty = max(penalty, new_penalty)
# Compute predicted reduction according to formula (3.52),
# reference [2]_, p.891.
predicted_reduction = -quadratic_model + penalty*vpred
# Compute merit function at current point
merit_function = f + penalty*norm(b)
# Evaluate function and constraints at trial point
x_next = x + S.dot(d)
f_next, b_next = fun_and_constr(x_next)
# Increment funcion evaluation counter
state.nfev += 1
state.ncev += 1
# Compute merit function at trial point
merit_function_next = f_next + penalty*norm(b_next)
# Compute actual reduction according to formula (3.54),
# reference [2]_, p.892.
actual_reduction = merit_function - merit_function_next
# Compute reduction ratio
reduction_ratio = actual_reduction / predicted_reduction
# Second order correction (SOC), reference [2]_, p.892.
if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \
norm(dn) <= SOC_THRESHOLD * norm(dt):
# Compute second order correction
y = -Y.dot(b_next)
# Make sure increment is inside box constraints
_, t, intersect = box_intersections(d, y, trust_lb, trust_ub)
# Compute tentative point
x_soc = x + S.dot(d + t*y)
f_soc, b_soc = fun_and_constr(x_soc)
# Increment funcion evaluation counter
state.nfev += 1
state.ncev += 1
# Recompute actual reduction
merit_function_soc = f_soc + penalty*norm(b_soc)
actual_reduction_soc = merit_function - merit_function_soc
# Recompute reduction ratio
reduction_ratio_soc = actual_reduction_soc / predicted_reduction
if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO:
x_next = x_soc
f_next = f_soc
b_next = b_soc
reduction_ratio = reduction_ratio_soc
# Readjust trust region step, formula (3.55), reference [2]_, p.892.
if reduction_ratio >= LARGE_REDUCTION_RATIO:
trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d),
trust_radius)
elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO:
trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d),
trust_radius)
# Reduce trust region step, according to reference [3]_, p.696.
elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO:
trust_reduction \
= (1-SUFFICIENT_REDUCTION_RATIO)/(1-reduction_ratio)
new_trust_radius = trust_reduction * norm(d)
if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius:
trust_radius *= MAX_TRUST_REDUCTION
elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius:
trust_radius = new_trust_radius
else:
trust_radius *= MIN_TRUST_REDUCTION
# Update iteration
state.niter += 1
if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO:
x = x_next
f, b = f_next, b_next
c, A = grad_and_jac(x)
S = scaling(x)
# Increment funcion evaluation counter
state.ngev += 1
state.njev += 1
# Get projections
Z, LS, Y = projections(A)
# Compute least-square lagrange multipliers
v = -LS.dot(c)
# Set Flag
compute_hess = True
# Store state
state.x = x
state.v = v
state.fun = f
state.grad = c
state.constr = b
state.jac = A
# Otimality values
state.optimality = norm(c + A.T.dot(v), np.inf)
state.constr_violation = norm(b, np.inf) if len(b) > 0 else 0
else:
penalty = previous_penalty
compute_hess = False
# Store values
state.trust_radius = trust_radius
state.penalty = penalty
state.cg_niter += info_cg["niter"]
state.cg_info = info_cg
if return_all:
state.allvecs.append(np.copy(x))
state.allmult.append(np.copy(v))
return state
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_upgrade_profile_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_access_profile_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
role_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"roleName": _SERIALIZER.url("role_name", role_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_cluster_admin_credentials_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_cluster_user_credentials_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_tags_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_reset_service_principal_profile_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_reset_aad_profile_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class ManagedClustersOperations(object):
"""ManagedClustersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.ManagedClusterListResult"]:
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription. The operation returns properties
of each managed cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.ManagedClusterListResult"]:
"""Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group. The operation returns
properties of each managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
@distributed_trace
def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedClusterUpgradeProfile":
"""Gets upgrade profile for a managed cluster.
Gets the details of the upgrade profile for a managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_upgrade_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_upgrade_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default'} # type: ignore
@distributed_trace
def get_access_profile(
self,
resource_group_name: str,
resource_name: str,
role_name: str,
**kwargs: Any
) -> "_models.ManagedClusterAccessProfile":
"""Gets an access profile of a managed cluster.
Gets the accessProfile for the specified role name of the managed cluster with a specified
resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param role_name: The name of the role for managed cluster accessProfile resource.
:type role_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterAccessProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterAccessProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterAccessProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_access_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
role_name=role_name,
template_url=self.get_access_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterAccessProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential'} # type: ignore
@distributed_trace
def list_cluster_admin_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster admin credential of a managed cluster.
Gets cluster admin credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_04_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_admin_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_admin_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential'} # type: ignore
@distributed_trace
def list_cluster_user_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster user credential of a managed cluster.
Gets cluster user credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_04_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_user_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_user_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedCluster":
"""Gets a managed cluster.
Gets the details of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_04_01.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedCluster')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> LROPoller["_models.ManagedCluster"]:
"""Creates or updates a managed cluster.
Creates or updates a managed cluster with the specified configuration for agents and Kubernetes
version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Create or Update a Managed Cluster operation.
:type parameters: ~azure.mgmt.containerservice.v2019_04_01.models.ManagedCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2019_04_01.models.ManagedCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'TagsObject')
request = build_update_tags_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._update_tags_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace
def begin_update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> LROPoller["_models.ManagedCluster"]:
"""Updates tags on a managed cluster.
Updates a managed cluster with the specified tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2019_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2019_04_01.models.ManagedCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a managed cluster.
Deletes the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _reset_service_principal_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedClusterServicePrincipalProfile')
request = build_reset_service_principal_profile_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._reset_service_principal_profile_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
@distributed_trace
def begin_reset_service_principal_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> LROPoller[None]:
"""Reset Service Principal Profile of a managed cluster.
Update the service principal Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset Service Principal Profile operation for a
Managed Cluster.
:type parameters:
~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterServicePrincipalProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_service_principal_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
def _reset_aad_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedClusterAADProfile')
request = build_reset_aad_profile_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._reset_aad_profile_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
@distributed_trace
def begin_reset_aad_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> LROPoller[None]:
"""Reset AAD Profile of a managed cluster.
Update the AAD Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset AAD Profile operation for a Managed
Cluster.
:type parameters: ~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterAADProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_aad_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
|
|
from __future__ import print_function, division
from sympy.core import Add, S, C, sympify, oo, pi, Dummy, Rational
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.compatibility import xrange
from .zeta_functions import zeta
from .error_functions import erf
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import csc
from sympy.functions.combinatorial.numbers import bernoulli
from sympy.functions.combinatorial.factorials import rf
from sympy.functions.combinatorial.numbers import harmonic
###############################################################################
############################ COMPLETE GAMMA FUNCTION ##########################
###############################################################################
class gamma(Function):
r"""
The gamma function
.. math::
\Gamma(x) := \int^{\infty}_{0} t^{x-1} e^{t} \mathrm{d}t.
The ``gamma`` function implements the function which passes through the
values of the factorial function, i.e. `\Gamma(n) = (n - 1)!` when n is
an integer. More general, `\Gamma(z)` is defined in the whole complex
plane except at the negative integers where there are simple poles.
Examples
========
>>> from sympy import S, I, pi, oo, gamma
>>> from sympy.abc import x
Several special values are known:
>>> gamma(1)
1
>>> gamma(4)
6
>>> gamma(S(3)/2)
sqrt(pi)/2
The Gamma function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(gamma(x))
gamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(gamma(x), x)
gamma(x)*polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(gamma(x), x, 0, 3)
1/x - EulerGamma + x*(EulerGamma**2/2 + pi**2/12) + x**2*(-EulerGamma*pi**2/12 + polygamma(2, 1)/6 - EulerGamma**3/6) + O(x**3)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> gamma(pi).evalf(40)
2.288037795340032417959588909060233922890
>>> gamma(1+I).evalf(20)
0.49801566811835604271 - 0.15494982830181068512*I
See Also
========
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/GammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma/
"""
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return gamma(self.args[0])*polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg.is_Integer:
if arg.is_positive:
return C.factorial(arg - 1)
else:
return S.ComplexInfinity
elif arg.is_Rational:
if arg.q == 2:
n = abs(arg.p) // arg.q
if arg.is_positive:
k, coeff = n, S.One
else:
n = k = n + 1
if n & 1 == 0:
coeff = S.One
else:
coeff = S.NegativeOne
for i in range(3, 2*k, 2):
coeff *= i
if arg.is_positive:
return coeff*sqrt(S.Pi) / 2**n
else:
return 2**n*sqrt(S.Pi) / coeff
def _eval_expand_func(self, **hints):
arg = self.args[0]
if arg.is_Rational:
if abs(arg.p) > arg.q:
x = Dummy('x')
n = arg.p // arg.q
p = arg.p - n*arg.q
return gamma(x + n)._eval_expand_func().subs(x, Rational(p, arg.q))
if arg.is_Add:
coeff, tail = arg.as_coeff_add()
if coeff and coeff.q != 1:
intpart = floor(coeff)
tail = (coeff - intpart,) + tail
coeff = intpart
tail = arg._new_rawargs(*tail, reeval=False)
return gamma(tail)*C.RisingFactorial(tail, coeff)
return self.func(*self.args)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
return self.args[0].is_real
def _eval_rewrite_as_tractable(self, z):
return C.exp(loggamma(z))
def _eval_nseries(self, x, n, logx):
x0 = self.args[0].limit(x, 0)
if not (x0.is_Integer and x0 <= 0):
return super(gamma, self)._eval_nseries(x, n, logx)
t = self.args[0] - x0
return (gamma(t + 1)/rf(self.args[0], -x0 + 1))._eval_nseries(x, n, logx)
def _latex(self, printer, exp=None):
if len(self.args) != 1:
raise ValueError("Args length should be 1")
aa = printer._print(self.args[0])
if exp:
return r'\Gamma^{%s}{\left(%s \right)}' % (printer._print(exp), aa)
else:
return r'\Gamma{\left(%s \right)}' % aa
@staticmethod
def _latex_no_arg(printer):
return r'\Gamma'
###############################################################################
################## LOWER and UPPER INCOMPLETE GAMMA FUNCTIONS #################
###############################################################################
class lowergamma(Function):
r"""
The lower incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\gamma(s, x) := \int_0^x t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \Gamma(s, x).
This can be shown to be the same as
.. math::
\gamma(s, x) = \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
Examples
========
>>> from sympy import lowergamma, S
>>> from sympy.abc import s, x
>>> lowergamma(s, x)
lowergamma(s, x)
>>> lowergamma(3, x)
-x**2*exp(-x) - 2*x*exp(-x) + 2 - 2*exp(-x)
>>> lowergamma(-S(1)/2, x)
-2*sqrt(pi)*erf(sqrt(x)) - 2*exp(-x)/sqrt(x)
See Also
========
gamma: Gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Incomplete_gamma_function#Lower_Incomplete_Gamma_Function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return C.exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return gamma(a)*digamma(a) - log(z)*uppergamma(a, z) \
- meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, a, x):
# For lack of a better place, we use this one to extract branching
# information. The following can be
# found in the literature (c/f references given above), albeit scattered:
# 1) For fixed x != 0, lowergamma(s, x) is an entire function of s
# 2) For fixed positive integers s, lowergamma(s, x) is an entire
# function of x.
# 3) For fixed non-positive integers s,
# lowergamma(s, exp(I*2*pi*n)*x) =
# 2*pi*I*n*(-1)**(-s)/factorial(-s) + lowergamma(s, x)
# (this follows from lowergamma(s, x).diff(x) = x**(s-1)*exp(-x)).
# 4) For fixed non-integral s,
# lowergamma(s, x) = x**s*gamma(s)*lowergamma_unbranched(s, x),
# where lowergamma_unbranched(s, x) is an entire function (in fact
# of both s and x), i.e.
# lowergamma(s, exp(2*I*pi*n)*x) = exp(2*pi*I*n*a)*lowergamma(a, x)
from sympy import unpolarify, I, factorial, exp
nx, n = x.extract_branch_factor()
if a.is_integer and a.is_positive:
nx = unpolarify(x)
if nx != x:
return lowergamma(a, nx)
elif a.is_integer and a.is_nonpositive:
if n != 0:
return 2*pi*I*n*(-1)**(-a)/factorial(-a) + lowergamma(a, nx)
elif n != 0:
return exp(2*pi*I*n*a)*lowergamma(a, nx)
# Special values.
if a.is_Number:
# TODO this should be non-recursive
if a is S.One:
return S.One - C.exp(-x)
elif a is S.Half:
return sqrt(pi)*erf(sqrt(x))
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, x) - x**b * C.exp(-x)
if not a.is_Integer:
return (cls(a + 1, x) + x**a * C.exp(-x))/a
def _eval_evalf(self, prec):
from sympy.mpmath import mp, workprec
from sympy import Expr
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, 0, z)
return Expr._from_mpmath(res, prec)
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_uppergamma(self, s, x):
return gamma(s) - uppergamma(s, x)
def _eval_rewrite_as_expint(self, s, x):
from sympy import expint
if s.is_integer and s.is_nonpositive:
return self
return self.rewrite(uppergamma).rewrite(expint)
@staticmethod
def _latex_no_arg(printer):
return r'\gamma'
class uppergamma(Function):
r"""
The upper incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\Gamma(s, x) := \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \gamma(s, x).
where `\gamma(s, x)` is the lower incomplete gamma function,
:class:`lowergamma`. This can be shown to be the same as
.. math::
\Gamma(s, x) = \Gamma(s) - \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
The upper incomplete gamma function is also essentially equivalent to the
generalized exponential integral:
.. math::
\operatorname{E}_{n}(x) = \int_{1}^{\infty}{\frac{e^{-xt}}{t^n} \, dt} = x^{n-1}\Gamma(1-n,x).
Examples
========
>>> from sympy import uppergamma, S
>>> from sympy.abc import s, x
>>> uppergamma(s, x)
uppergamma(s, x)
>>> uppergamma(3, x)
x**2*exp(-x) + 2*x*exp(-x) + 2*exp(-x)
>>> uppergamma(-S(1)/2, x)
-2*sqrt(pi)*(-erf(sqrt(x)) + 1) + 2*exp(-x)/sqrt(x)
>>> uppergamma(-2, x)
expint(3, x)/x**2
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Incomplete_gamma_function#Upper_Incomplete_Gamma_Function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
.. [6] http://en.wikipedia.org/wiki/Exponential_integral#Relation_with_other_functions
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return -C.exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return uppergamma(a, z)*log(z) + meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
from sympy.mpmath import mp, workprec
from sympy import Expr
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, z, mp.inf)
return Expr._from_mpmath(res, prec)
@classmethod
def eval(cls, a, z):
from sympy import unpolarify, I, factorial, exp, expint
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
return S.Zero
elif z is S.Zero:
# TODO: Holds only for Re(a) > 0:
return gamma(a)
# We extract branching information here. C/f lowergamma.
nx, n = z.extract_branch_factor()
if a.is_integer and (a > 0) == True:
nx = unpolarify(z)
if z != nx:
return uppergamma(a, nx)
elif a.is_integer and (a <= 0) == True:
if n != 0:
return -2*pi*I*n*(-1)**(-a)/factorial(-a) + uppergamma(a, nx)
elif n != 0:
return gamma(a)*(1 - exp(2*pi*I*n*a)) + exp(2*pi*I*n*a)*uppergamma(a, nx)
# Special values.
if a.is_Number:
# TODO this should be non-recursive
if a is S.One:
return C.exp(-z)
elif a is S.Half:
return sqrt(pi)*(1 - erf(sqrt(z))) # TODO could use erfc...
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, z) + z**b * C.exp(-z)
elif b.is_Integer:
return expint(-b, z)*unpolarify(z)**(b + 1)
if not a.is_Integer:
return (cls(a + 1, z) - z**a * C.exp(-z))/a
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_lowergamma(self, s, x):
return gamma(s) - lowergamma(s, x)
def _eval_rewrite_as_expint(self, s, x):
from sympy import expint
return expint(1 - s, x)*x**s
###############################################################################
###################### POLYGAMMA and LOGGAMMA FUNCTIONS #######################
###############################################################################
class polygamma(Function):
r"""
The function ``polygamma(n, z)`` returns ``log(gamma(z)).diff(n + 1)``.
It is a meromorphic function on `\mathbb{C}` and defined as the (n+1)-th
derivative of the logarithm of the gamma function:
.. math::
\psi^{(n)} (z) := \frac{\mathrm{d}^{n+1}}{\mathrm{d} z^{n+1}} \log\Gamma(z).
Examples
========
Several special values are known:
>>> from sympy import S, polygamma
>>> polygamma(0, 1)
-EulerGamma
>>> polygamma(0, 1/S(2))
-2*log(2) - EulerGamma
>>> polygamma(0, 1/S(3))
-3*log(3)/2 - sqrt(3)*pi/6 - EulerGamma
>>> polygamma(0, 1/S(4))
-3*log(2) - pi/2 - EulerGamma
>>> polygamma(0, 2)
-EulerGamma + 1
>>> polygamma(0, 23)
-EulerGamma + 19093197/5173168
>>> from sympy import oo, I
>>> polygamma(0, oo)
oo
>>> polygamma(0, -oo)
oo
>>> polygamma(0, I*oo)
oo
>>> polygamma(0, -I*oo)
oo
Differentiation with respect to x is supported:
>>> from sympy import Symbol, diff
>>> x = Symbol("x")
>>> diff(polygamma(0, x), x)
polygamma(1, x)
>>> diff(polygamma(0, x), x, 2)
polygamma(2, x)
>>> diff(polygamma(0, x), x, 3)
polygamma(3, x)
>>> diff(polygamma(1, x), x)
polygamma(2, x)
>>> diff(polygamma(1, x), x, 2)
polygamma(3, x)
>>> diff(polygamma(2, x), x)
polygamma(3, x)
>>> diff(polygamma(2, x), x, 2)
polygamma(4, x)
>>> n = Symbol("n")
>>> diff(polygamma(n, x), x)
polygamma(n + 1, x)
>>> diff(polygamma(n, x), x, 2)
polygamma(n + 2, x)
We can rewrite polygamma functions in terms of harmonic numbers:
>>> from sympy import harmonic
>>> polygamma(0, x).rewrite(harmonic)
harmonic(x - 1) - EulerGamma
>>> polygamma(2, x).rewrite(harmonic)
2*harmonic(x - 1, 3) - 2*zeta(3)
>>> ni = Symbol("n", integer=True)
>>> polygamma(ni, x).rewrite(harmonic)
(-1)**(n + 1)*(-harmonic(x - 1, n + 1) + zeta(n + 1))*factorial(n)
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Polygamma_function
.. [2] http://mathworld.wolfram.com/PolygammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma/
.. [4] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
def fdiff(self, argindex=2):
if argindex == 2:
n, z = self.args[:2]
return polygamma(n + 1, z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_positive(self):
if self.args[1].is_positive and (self.args[0] > 0) == True:
return self.args[0].is_odd
def _eval_is_negative(self):
if self.args[1].is_positive and (self.args[0] > 0) == True:
return self.args[0].is_even
def _eval_is_real(self):
return self.args[0].is_real
def _eval_aseries(self, n, args0, x, logx):
if args0[1] != oo or not \
(self.args[0].is_Integer and self.args[0].is_nonnegative):
return super(polygamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[1]
N = self.args[0]
if N == 0:
# digamma function series
# Abramowitz & Stegun, p. 259, 6.3.18
r = log(z) - 1/(2*z)
o = None
if n < 2:
o = C.Order(1/z, x)
else:
m = C.ceiling((n + 1)//2)
l = [bernoulli(2*k) / (2*k*z**(2*k)) for k in range(1, m)]
r -= Add(*l)
o = C.Order(1/z**(2*m), x)
return r._eval_nseries(x, n, logx) + o
else:
# proper polygamma function
# Abramowitz & Stegun, p. 260, 6.4.10
# We return terms to order higher than O(x**n) on purpose
# -- otherwise we would not be able to return any terms for
# quite a long time!
fac = gamma(N)
e0 = fac + N*fac/(2*z)
m = C.ceiling((n + 1)//2)
for k in range(1, m):
fac = fac*(2*k + N - 1)*(2*k + N - 2) / ((2*k)*(2*k - 1))
e0 += bernoulli(2*k)*fac/z**(2*k)
o = C.Order(1/z**(2*m), x)
if n == 0:
o = C.Order(1/z, x)
elif n == 1:
o = C.Order(1/z**2, x)
r = e0._eval_nseries(z, n, logx) + o
return (-1 * (-1/z)**N * r)._eval_nseries(x, n, logx)
@classmethod
def eval(cls, n, z):
n, z = list(map(sympify, (n, z)))
from sympy import unpolarify
if n.is_integer:
if n.is_nonnegative:
nz = unpolarify(z)
if z != nz:
return polygamma(n, nz)
if n == -1:
return loggamma(z)
else:
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
if n.is_Number:
if n is S.Zero:
return S.Infinity
else:
return S.Zero
elif z.is_Integer:
if z.is_nonpositive:
return S.ComplexInfinity
else:
if n is S.Zero:
return -S.EulerGamma + C.harmonic(z - 1, 1)
elif n.is_odd:
return (-1)**(n + 1)*C.factorial(n)*zeta(n + 1, z)
if n == 0:
if z is S.NaN:
return S.NaN
elif z.is_Rational:
# TODO actually *any* n/m can be done, but that is messy
lookup = {S(1)/2: -2*log(2) - S.EulerGamma,
S(1)/3: -S.Pi/2/sqrt(3) - 3*log(3)/2 - S.EulerGamma,
S(1)/4: -S.Pi/2 - 3*log(2) - S.EulerGamma,
S(3)/4: -3*log(2) - S.EulerGamma + S.Pi/2,
S(2)/3: -3*log(3)/2 + S.Pi/2/sqrt(3) - S.EulerGamma}
if z > 0:
n = floor(z)
z0 = z - n
if z0 in lookup:
return lookup[z0] + Add(*[1/(z0 + k) for k in range(n)])
elif z < 0:
n = floor(1 - z)
z0 = z + n
if z0 in lookup:
return lookup[z0] - Add(*[1/(z0 - 1 - k) for k in range(n)])
elif z in (S.Infinity, S.NegativeInfinity):
return S.Infinity
else:
t = z.extract_multiplicatively(S.ImaginaryUnit)
if t in (S.Infinity, S.NegativeInfinity):
return S.Infinity
# TODO n == 1 also can do some rational z
def _eval_expand_func(self, **hints):
n, z = self.args
if n.is_Integer and n.is_nonnegative:
if z.is_Add:
coeff = z.args[0]
if coeff.is_Integer:
e = -(n + 1)
if coeff > 0:
tail = Add(*[C.Pow(
z - i, e) for i in xrange(1, int(coeff) + 1)])
else:
tail = -Add(*[C.Pow(
z + i, e) for i in xrange(0, int(-coeff))])
return polygamma(n, z - coeff) + (-1)**n*C.factorial(n)*tail
elif z.is_Mul:
coeff, z = z.as_two_terms()
if coeff.is_Integer and coeff.is_positive:
tail = [ polygamma(n, z + C.Rational(
i, coeff)) for i in xrange(0, int(coeff)) ]
if n == 0:
return Add(*tail)/coeff + log(coeff)
else:
return Add(*tail)/coeff**(n + 1)
z *= coeff
return polygamma(n, z)
def _eval_rewrite_as_zeta(self, n, z):
if n >= S.One:
return (-1)**(n + 1)*C.factorial(n)*zeta(n + 1, z)
else:
return self
def _eval_rewrite_as_harmonic(self, n, z):
if n.is_integer:
if n == S.Zero:
return harmonic(z - 1) - S.EulerGamma
else:
return S.NegativeOne**(n+1) * C.factorial(n) * (C.zeta(n+1) - harmonic(z-1, n+1))
def _eval_as_leading_term(self, x):
n, z = [a.as_leading_term(x) for a in self.args]
o = C.Order(z, x)
if n == 0 and o.contains(1/x):
return o.getn() * log(x)
else:
return self.func(n, z)
class loggamma(Function):
r"""
The ``loggamma`` function implements the logarithm of the
gamma function i.e, `\log\Gamma(x)`.
Examples
========
Several special values are known. For numerical integral
arguments we have:
>>> from sympy import loggamma
>>> loggamma(-2)
oo
>>> loggamma(0)
oo
>>> loggamma(1)
0
>>> loggamma(2)
0
>>> loggamma(3)
log(2)
and for symbolic values:
>>> from sympy import Symbol
>>> n = Symbol("n", integer=True, positive=True)
>>> loggamma(n)
log(gamma(n))
>>> loggamma(-n)
oo
for half-integral values:
>>> from sympy import S, pi
>>> loggamma(S(5)/2)
log(3*sqrt(pi)/4)
>>> loggamma(n/2)
log(2**(-n + 1)*sqrt(pi)*gamma(n)/gamma(n/2 + 1/2))
and general rational arguments:
>>> from sympy import expand_func
>>> L = loggamma(S(16)/3)
>>> expand_func(L).doit()
-5*log(3) + loggamma(1/3) + log(4) + log(7) + log(10) + log(13)
>>> L = loggamma(S(19)/4)
>>> expand_func(L).doit()
-4*log(4) + loggamma(3/4) + log(3) + log(7) + log(11) + log(15)
>>> L = loggamma(S(23)/7)
>>> expand_func(L).doit()
-3*log(7) + log(2) + loggamma(2/7) + log(9) + log(16)
The loggamma function has the following limits towards infinity:
>>> from sympy import oo
>>> loggamma(oo)
oo
>>> loggamma(-oo)
zoo
The loggamma function obeys the mirror symmetry
if `x \in \mathbb{C} \setminus \{-\infty, 0\}`:
>>> from sympy.abc import x
>>> from sympy import conjugate
>>> conjugate(loggamma(x))
loggamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(loggamma(x), x)
polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(loggamma(x), x, 0, 4)
-log(x) - EulerGamma*x + pi**2*x**2/12 + x**3*polygamma(2, 1)/6 + O(x**4)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> from sympy import I
>>> loggamma(5).evalf(30)
3.17805383034794561964694160130
>>> loggamma(I).evalf(20)
-0.65092319930185633889 - 1.8724366472624298171*I
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/LogGammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/LogGamma/
"""
@classmethod
def eval(cls, z):
z = sympify(z)
if z.is_integer:
if z.is_nonpositive:
return S.Infinity
elif z.is_positive:
return log(gamma(z))
elif z.is_rational:
p, q = z.as_numer_denom()
# Half-integral values:
if p.is_positive and q == 2:
return log(sqrt(S.Pi) * 2**(1 - p) * gamma(p) / gamma((p + 1)*S.Half))
if z is S.Infinity:
return S.Infinity
elif abs(z) is S.Infinity:
return S.ComplexInfinity
if z is S.NaN:
return S.NaN
def _eval_expand_func(self, **hints):
z = self.args[0]
if z.is_Rational:
p, q = z.as_numer_denom()
# General rational arguments (u + p/q)
# Split z as n + p/q with p < q
n = p // q
p = p - n*q
if p.is_positive and q.is_positive and p < q:
k = Dummy("k")
if n.is_positive:
return loggamma(p / q) - n*log(q) + C.Sum(log((k - 1)*q + p), (k, 1, n))
elif n.is_negative:
return loggamma(p / q) - n*log(q) + S.Pi*S.ImaginaryUnit*n - C.Sum(log(k*q - p), (k, 1, -n))
elif n.is_zero:
return loggamma(p / q)
return self
def _eval_nseries(self, x, n, logx=None):
x0 = self.args[0].limit(x, 0)
if x0 is S.Zero:
f = self._eval_rewrite_as_intractable(*self.args)
return f._eval_nseries(x, n, logx)
return super(loggamma, self)._eval_nseries(x, n, logx)
def _eval_aseries(self, n, args0, x, logx):
if args0[0] != oo:
return super(loggamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[0]
m = min(n, C.ceiling((n + S(1))/2))
r = log(z)*(z - S(1)/2) - z + log(2*pi)/2
l = [bernoulli(2*k) / (2*k*(2*k - 1)*z**(2*k - 1)) for k in range(1, m)]
o = None
if m == 0:
o = C.Order(1, x)
else:
o = C.Order(1/z**(2*m - 1), x)
# It is very inefficient to first add the order and then do the nseries
return (r + Add(*l))._eval_nseries(x, n, logx) + o
def _eval_rewrite_as_intractable(self, z):
return log(gamma(z))
def _eval_is_real(self):
return self.args[0].is_real
def _eval_conjugate(self):
z = self.args[0]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(z.conjugate())
def fdiff(self, argindex=1):
if argindex == 1:
return polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def digamma(x):
r"""
The digamma function is the first derivative of the loggamma function i.e,
.. math::
\psi(x) := \frac{\mathrm{d}}{\mathrm{d} z} \log\Gamma(z)
= \frac{\Gamma'(z)}{\Gamma(z) }
In this case, ``digamma(z) = polygamma(0, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Digamma_function
.. [2] http://mathworld.wolfram.com/DigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(0, x)
def trigamma(x):
r"""
The trigamma function is the second derivative of the loggamma function i.e,
.. math::
\psi^{(1)}(z) := \frac{\mathrm{d}^{2}}{\mathrm{d} z^{2}} \log\Gamma(z).
In this case, ``trigamma(z) = polygamma(1, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Trigamma_function
.. [2] http://mathworld.wolfram.com/TrigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(1, x)
|
|
"""Stuff that differs in different Python versions and platform
distributions."""
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import, division
import codecs
import locale
import logging
import os
import shutil
import sys
from pip._vendor.six import PY2, text_type
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, Text, Tuple, Union
try:
import ipaddress
except ImportError:
try:
from pip._vendor import ipaddress # type: ignore
except ImportError:
import ipaddr as ipaddress # type: ignore
ipaddress.ip_address = ipaddress.IPAddress # type: ignore
ipaddress.ip_network = ipaddress.IPNetwork # type: ignore
__all__ = [
"ipaddress", "uses_pycache", "console_to_str",
"get_path_uid", "stdlib_pkgs", "WINDOWS", "samefile", "get_terminal_size",
]
logger = logging.getLogger(__name__)
if PY2:
import imp
try:
cache_from_source = imp.cache_from_source # type: ignore
except AttributeError:
# does not use __pycache__
cache_from_source = None
uses_pycache = cache_from_source is not None
else:
uses_pycache = True
from importlib.util import cache_from_source
if PY2:
# In Python 2.7, backslashreplace exists
# but does not support use for decoding.
# We implement our own replace handler for this
# situation, so that we can consistently use
# backslash replacement for all versions.
def backslashreplace_decode_fn(err):
raw_bytes = (err.object[i] for i in range(err.start, err.end))
# Python 2 gave us characters - convert to numeric bytes
raw_bytes = (ord(b) for b in raw_bytes)
return u"".join(map(u"\\x{:x}".format, raw_bytes)), err.end
codecs.register_error(
"backslashreplace_decode",
backslashreplace_decode_fn,
)
backslashreplace_decode = "backslashreplace_decode"
else:
backslashreplace_decode = "backslashreplace"
def has_tls():
# type: () -> bool
try:
import _ssl # noqa: F401 # ignore unused
return True
except ImportError:
pass
from pip._vendor.urllib3.util import IS_PYOPENSSL
return IS_PYOPENSSL
def str_to_display(data, desc=None):
# type: (Union[bytes, Text], Optional[str]) -> Text
"""
For display or logging purposes, convert a bytes object (or text) to
text (e.g. unicode in Python 2) safe for output.
:param desc: An optional phrase describing the input data, for use in
the log message if a warning is logged. Defaults to "Bytes object".
This function should never error out and so can take a best effort
approach. It is okay to be lossy if needed since the return value is
just for display.
We assume the data is in the locale preferred encoding. If it won't
decode properly, we warn the user but decode as best we can.
We also ensure that the output can be safely written to standard output
without encoding errors.
"""
if isinstance(data, text_type):
return data
# Otherwise, data is a bytes object (str in Python 2).
# First, get the encoding we assume. This is the preferred
# encoding for the locale, unless that is not found, or
# it is ASCII, in which case assume UTF-8
encoding = locale.getpreferredencoding()
if (not encoding) or codecs.lookup(encoding).name == "ascii":
encoding = "utf-8"
# Now try to decode the data - if we fail, warn the user and
# decode with replacement.
try:
decoded_data = data.decode(encoding)
except UnicodeDecodeError:
logger.warning(
'%s does not appear to be encoded as %s',
desc or 'Bytes object',
encoding,
)
decoded_data = data.decode(encoding, errors=backslashreplace_decode)
# Make sure we can print the output, by encoding it to the output
# encoding with replacement of unencodable characters, and then
# decoding again.
# We use stderr's encoding because it's less likely to be
# redirected and if we don't find an encoding we skip this
# step (on the assumption that output is wrapped by something
# that won't fail).
# The double getattr is to deal with the possibility that we're
# being called in a situation where sys.__stderr__ doesn't exist,
# or doesn't have an encoding attribute. Neither of these cases
# should occur in normal pip use, but there's no harm in checking
# in case people use pip in (unsupported) unusual situations.
output_encoding = getattr(getattr(sys, "__stderr__", None),
"encoding", None)
if output_encoding:
output_encoded = decoded_data.encode(
output_encoding,
errors="backslashreplace"
)
decoded_data = output_encoded.decode(output_encoding)
return decoded_data
def console_to_str(data):
# type: (bytes) -> Text
"""Return a string, safe for output, of subprocess output.
"""
return str_to_display(data, desc='Subprocess output')
def get_path_uid(path):
# type: (str) -> int
"""
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError(
"{} is a symlink; Will not return uid for symlinks".format(
path)
)
return file_uid
def expanduser(path):
# type: (str) -> str
"""
Expand ~ and ~user constructions.
Includes a workaround for https://bugs.python.org/issue14768
"""
expanded = os.path.expanduser(path)
if path.startswith('~/') and expanded.startswith('//'):
expanded = expanded[1:]
return expanded
# packages in the stdlib that may have installation metadata, but should not be
# considered 'installed'. this theoretically could be determined based on
# dist.location (py27:`sysconfig.get_paths()['stdlib']`,
# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
# make this ineffective, so hard-coding
stdlib_pkgs = {"python", "wsgiref", "argparse"}
# windows detection, covers cpython and ironpython
WINDOWS = (sys.platform.startswith("win") or
(sys.platform == 'cli' and os.name == 'nt'))
def samefile(file1, file2):
# type: (str, str) -> bool
"""Provide an alternative for os.path.samefile on Windows/Python2"""
if hasattr(os.path, 'samefile'):
return os.path.samefile(file1, file2)
else:
path1 = os.path.normcase(os.path.abspath(file1))
path2 = os.path.normcase(os.path.abspath(file2))
return path1 == path2
if hasattr(shutil, 'get_terminal_size'):
def get_terminal_size():
# type: () -> Tuple[int, int]
"""
Returns a tuple (x, y) representing the width(x) and the height(y)
in characters of the terminal window.
"""
return tuple(shutil.get_terminal_size()) # type: ignore
else:
def get_terminal_size():
# type: () -> Tuple[int, int]
"""
Returns a tuple (x, y) representing the width(x) and the height(y)
in characters of the terminal window.
"""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack_from(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '12345678')
)
except Exception:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
if sys.platform != "win32":
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except Exception:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
|
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from devtools_testutils import recorded_by_proxy, set_bodiless_matcher
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import DocumentAnalysisClient, DocumentModelAdministrationClient, AnalyzeResult
from azure.ai.formrecognizer._generated.v2022_01_30_preview.models import AnalyzeResultOperation
from testcase import FormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
from preparers import FormRecognizerPreparer
DocumentModelAdministrationClientPreparer = functools.partial(_GlobalClientPreparer, DocumentModelAdministrationClient)
class TestDACAnalyzeCustomModel(FormRecognizerTest):
def teardown(self):
self.sleep(4)
@FormRecognizerPreparer()
def test_analyze_document_none_model_id(self, **kwargs):
formrecognizer_test_endpoint = kwargs.pop("formrecognizer_test_endpoint")
formrecognizer_test_api_key = kwargs.pop("formrecognizer_test_api_key")
client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
with pytest.raises(ValueError):
client.begin_analyze_document(model=None, document=b"xx")
@FormRecognizerPreparer()
def test_analyze_document_empty_model_id(self, **kwargs):
formrecognizer_test_endpoint = kwargs.pop("formrecognizer_test_endpoint")
formrecognizer_test_api_key = kwargs.pop("formrecognizer_test_api_key")
client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
with pytest.raises(ValueError):
client.begin_analyze_document(model="", document=b"xx")
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_custom_document_transform(self, client, formrecognizer_storage_container_sas_url, **kwargs):
set_bodiless_matcher()
da_client = client.get_document_analysis_client()
poller = client.begin_build_model(formrecognizer_storage_container_sas_url, "template")
model = poller.result()
responses = []
def callback(raw_response, _, headers):
analyze_result = da_client._deserialize(AnalyzeResultOperation, raw_response)
document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(document)
with open(self.form_jpg, "rb") as fd:
my_file = fd.read()
poller = da_client.begin_analyze_document(
model.model_id,
my_file,
cls=callback
)
document = poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_custom_document_multipage_transform(self, client, formrecognizer_multipage_storage_container_sas_url, **kwargs):
set_bodiless_matcher()
da_client = client.get_document_analysis_client()
poller = client.begin_build_model(formrecognizer_multipage_storage_container_sas_url, "template")
model = poller.result()
responses = []
def callback(raw_response, _, headers):
analyze_result = da_client._deserialize(AnalyzeResultOperation, raw_response)
document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(document)
with open(self.multipage_invoice_pdf, "rb") as fd:
my_file = fd.read()
poller = da_client.begin_analyze_document(
model.model_id,
my_file,
cls=callback
)
document = poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_custom_document_selection_mark(self, client, formrecognizer_selection_mark_storage_container_sas_url, **kwargs):
set_bodiless_matcher()
da_client = client.get_document_analysis_client()
poller = client.begin_build_model(formrecognizer_selection_mark_storage_container_sas_url, "template")
model = poller.result()
with open(self.selection_form_pdf, "rb") as fd:
my_file = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = da_client._deserialize(AnalyzeResultOperation, raw_response)
document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(document)
poller = da_client.begin_analyze_document(
model.model_id,
my_file,
cls=callback
)
document = poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_pages_kwarg_specified(self, client, formrecognizer_storage_container_sas_url, **kwargs):
set_bodiless_matcher()
da_client = client.get_document_analysis_client()
with open(self.form_jpg, "rb") as fd:
my_file = fd.read()
build_poller = client.begin_build_model(formrecognizer_storage_container_sas_url, "template")
model = build_poller.result()
poller = da_client.begin_analyze_document(model.model_id, my_file, pages="1")
assert '1' == poller._polling_method._initial_response.http_response.request.query['pages']
result = poller.result()
assert result
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_custom_document_signature_field(self, client, formrecognizer_storage_container_sas_url, **kwargs):
set_bodiless_matcher()
da_client = client.get_document_analysis_client()
with open(self.form_jpg, "rb") as fd:
my_file = fd.read()
build_polling = client.begin_build_model(formrecognizer_storage_container_sas_url, "template")
model = build_polling.result()
poller = da_client.begin_analyze_document(
model.model_id,
my_file,
)
result = poller.result()
assert result.documents[0].fields.get("FullSignature").value == "signed"
assert result.documents[0].fields.get("FullSignature").value_type == "signature"
# this will notify us of changes in the service, currently expecting to get a None content for signature type fields
assert result.documents[0].fields.get("FullSignature").content == None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.