gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common import ring
from swift.common.ring.utils import (tiers_for_dev, build_tier_tree,
validate_and_normalize_ip,
validate_and_normalize_address,
is_valid_ip, is_valid_ipv4,
is_valid_ipv6, is_valid_hostname,
is_local_device, parse_search_value,
parse_search_values_from_opts,
parse_change_values_from_opts,
validate_args, parse_args,
parse_builder_ring_filename_args,
build_dev_from_opts, dispersion_report,
parse_address)
class TestUtils(unittest.TestCase):
def setUp(self):
self.test_dev = {'region': 1, 'zone': 1, 'ip': '192.168.1.1',
'port': '6000', 'id': 0}
def get_test_devs():
dev0 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1',
'port': '6000', 'id': 0}
dev1 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1',
'port': '6000', 'id': 1}
dev2 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1',
'port': '6000', 'id': 2}
dev3 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2',
'port': '6000', 'id': 3}
dev4 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2',
'port': '6000', 'id': 4}
dev5 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2',
'port': '6000', 'id': 5}
dev6 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1',
'port': '6000', 'id': 6}
dev7 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1',
'port': '6000', 'id': 7}
dev8 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1',
'port': '6000', 'id': 8}
dev9 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2',
'port': '6000', 'id': 9}
dev10 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2',
'port': '6000', 'id': 10}
dev11 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2',
'port': '6000', 'id': 11}
return [dev0, dev1, dev2, dev3, dev4, dev5,
dev6, dev7, dev8, dev9, dev10, dev11]
self.test_devs = get_test_devs()
def test_tiers_for_dev(self):
self.assertEqual(
tiers_for_dev(self.test_dev),
((1,),
(1, 1),
(1, 1, '192.168.1.1'),
(1, 1, '192.168.1.1', 0)))
def test_build_tier_tree(self):
ret = build_tier_tree(self.test_devs)
self.assertEqual(len(ret), 8)
self.assertEqual(ret[()], set([(1,)]))
self.assertEqual(ret[(1,)], set([(1, 1), (1, 2)]))
self.assertEqual(ret[(1, 1)],
set([(1, 1, '192.168.1.2'),
(1, 1, '192.168.1.1')]))
self.assertEqual(ret[(1, 2)],
set([(1, 2, '192.168.2.2'),
(1, 2, '192.168.2.1')]))
self.assertEqual(ret[(1, 1, '192.168.1.1')],
set([(1, 1, '192.168.1.1', 0),
(1, 1, '192.168.1.1', 1),
(1, 1, '192.168.1.1', 2)]))
self.assertEqual(ret[(1, 1, '192.168.1.2')],
set([(1, 1, '192.168.1.2', 3),
(1, 1, '192.168.1.2', 4),
(1, 1, '192.168.1.2', 5)]))
self.assertEqual(ret[(1, 2, '192.168.2.1')],
set([(1, 2, '192.168.2.1', 6),
(1, 2, '192.168.2.1', 7),
(1, 2, '192.168.2.1', 8)]))
self.assertEqual(ret[(1, 2, '192.168.2.2')],
set([(1, 2, '192.168.2.2', 9),
(1, 2, '192.168.2.2', 10),
(1, 2, '192.168.2.2', 11)]))
def test_is_valid_ip(self):
self.assertTrue(is_valid_ip("127.0.0.1"))
self.assertTrue(is_valid_ip("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ip(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ip(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ip(not_ipv6))
def test_is_valid_ipv4(self):
self.assertTrue(is_valid_ipv4("127.0.0.1"))
self.assertTrue(is_valid_ipv4("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "::1"
self.assertFalse(is_valid_ipv4(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv4(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv4(not_ipv6))
def test_is_valid_ipv6(self):
self.assertFalse(is_valid_ipv6("127.0.0.1"))
self.assertFalse(is_valid_ipv6("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ipv6(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv6(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv6(not_ipv6))
def test_is_valid_hostname(self):
self.assertTrue(is_valid_hostname("local"))
self.assertTrue(is_valid_hostname("test.test.com"))
hostname = "test." * 51
self.assertTrue(is_valid_hostname(hostname))
hostname = hostname.rstrip('.')
self.assertTrue(is_valid_hostname(hostname))
hostname = hostname + "00"
self.assertFalse(is_valid_hostname(hostname))
self.assertFalse(is_valid_hostname("$blah#"))
def test_is_local_device(self):
# localhost shows up in whataremyips() output as "::1" for IPv6
my_ips = ["127.0.0.1", "::1"]
my_port = 6000
self.assertTrue(is_local_device(my_ips, my_port,
"127.0.0.1", my_port))
self.assertTrue(is_local_device(my_ips, my_port,
"::1", my_port))
self.assertTrue(is_local_device(
my_ips, my_port,
"0000:0000:0000:0000:0000:0000:0000:0001", my_port))
self.assertTrue(is_local_device(my_ips, my_port,
"localhost", my_port))
self.assertFalse(is_local_device(my_ips, my_port,
"localhost", my_port + 1))
self.assertFalse(is_local_device(my_ips, my_port,
"127.0.0.2", my_port))
# for those that don't have a local port
self.assertTrue(is_local_device(my_ips, None,
my_ips[0], None))
# When servers_per_port is active, the "my_port" passed in is None
# which means "don't include port in the determination of locality
# because it's not reliable in this deployment scenario"
self.assertTrue(is_local_device(my_ips, None,
"127.0.0.1", 6666))
self.assertTrue(is_local_device(my_ips, None,
"::1", 6666))
self.assertTrue(is_local_device(
my_ips, None,
"0000:0000:0000:0000:0000:0000:0000:0001", 6666))
self.assertTrue(is_local_device(my_ips, None,
"localhost", 6666))
self.assertFalse(is_local_device(my_ips, None,
"127.0.0.2", my_port))
def test_validate_and_normalize_ip(self):
ipv4 = "10.0.0.1"
self.assertEqual(ipv4, validate_and_normalize_ip(ipv4))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertEqual(ipv6, validate_and_normalize_ip(ipv6.upper()))
hostname = "test.test.com"
self.assertRaises(ValueError,
validate_and_normalize_ip, hostname)
hostname = "$blah#"
self.assertRaises(ValueError,
validate_and_normalize_ip, hostname)
def test_validate_and_normalize_address(self):
ipv4 = "10.0.0.1"
self.assertEqual(ipv4, validate_and_normalize_address(ipv4))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertEqual(ipv6, validate_and_normalize_address(ipv6.upper()))
hostname = "test.test.com"
self.assertEqual(hostname,
validate_and_normalize_address(hostname.upper()))
hostname = "$blah#"
self.assertRaises(ValueError,
validate_and_normalize_address, hostname)
def test_parse_search_value(self):
res = parse_search_value('r0')
self.assertEqual(res, {'region': 0})
res = parse_search_value('r1')
self.assertEqual(res, {'region': 1})
res = parse_search_value('r1z2')
self.assertEqual(res, {'region': 1, 'zone': 2})
res = parse_search_value('d1')
self.assertEqual(res, {'id': 1})
res = parse_search_value('z1')
self.assertEqual(res, {'zone': 1})
res = parse_search_value('-127.0.0.1')
self.assertEqual(res, {'ip': '127.0.0.1'})
res = parse_search_value('127.0.0.1')
self.assertEqual(res, {'ip': '127.0.0.1'})
res = parse_search_value('-[127.0.0.1]:10001')
self.assertEqual(res, {'ip': '127.0.0.1', 'port': 10001})
res = parse_search_value(':10001')
self.assertEqual(res, {'port': 10001})
res = parse_search_value('R127.0.0.10')
self.assertEqual(res, {'replication_ip': '127.0.0.10'})
res = parse_search_value('R[127.0.0.10]:20000')
self.assertEqual(res, {'replication_ip': '127.0.0.10',
'replication_port': 20000})
res = parse_search_value('R:20000')
self.assertEqual(res, {'replication_port': 20000})
res = parse_search_value('/sdb1')
self.assertEqual(res, {'device': 'sdb1'})
res = parse_search_value('_meta1')
self.assertEqual(res, {'meta': 'meta1'})
self.assertRaises(ValueError, parse_search_value, 'OMGPONIES')
def test_parse_search_values_from_opts(self):
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "change.test.test.com",
"--change-port", "6001",
"--change-replication-ip", "change.r.test.com",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
expected = {
'id': 1,
'region': 2,
'zone': 3,
'ip': "test.test.com",
'port': 6000,
'replication_ip': "r.test.com",
'replication_port': 7000,
'device': "sda3",
'meta': "some meta data",
'weight': 3.14159265359,
}
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_search_values_from_opts(opts)
self.assertEquals(search_values, expected)
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "127.0.0.1",
"--port", "6000",
"--replication-ip", "127.0.0.10",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "127.0.0.2",
"--change-port", "6001",
"--change-replication-ip", "127.0.0.20",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
expected = {
'id': 1,
'region': 2,
'zone': 3,
'ip': "127.0.0.1",
'port': 6000,
'replication_ip': "127.0.0.10",
'replication_port': 7000,
'device': "sda3",
'meta': "some meta data",
'weight': 3.14159265359,
}
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_search_values_from_opts(opts)
self.assertEquals(search_values, expected)
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "[127.0.0.1]",
"--port", "6000",
"--replication-ip", "[127.0.0.10]",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "[127.0.0.2]",
"--change-port", "6001",
"--change-replication-ip", "[127.0.0.20]",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_search_values_from_opts(opts)
self.assertEquals(search_values, expected)
def test_parse_change_values_from_opts(self):
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "change.test.test.com",
"--change-port", "6001",
"--change-replication-ip", "change.r.test.com",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
expected = {
'ip': "change.test.test.com",
'port': 6001,
'replication_ip': "change.r.test.com",
'replication_port': 7001,
'device': "sdb3",
'meta': "some meta data for change",
}
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_change_values_from_opts(opts)
self.assertEquals(search_values, expected)
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "127.0.0.1",
"--port", "6000",
"--replication-ip", "127.0.0.10",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "127.0.0.2",
"--change-port", "6001",
"--change-replication-ip", "127.0.0.20",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
expected = {
'ip': "127.0.0.2",
'port': 6001,
'replication_ip': "127.0.0.20",
'replication_port': 7001,
'device': "sdb3",
'meta': "some meta data for change",
}
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_change_values_from_opts(opts)
self.assertEquals(search_values, expected)
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "[127.0.0.1]",
"--port", "6000",
"--replication-ip", "[127.0.0.10]",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "[127.0.0.2]",
"--change-port", "6001",
"--change-replication-ip", "[127.0.0.20]",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_change_values_from_opts(opts)
self.assertEquals(search_values, expected)
def test_validate_args(self):
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "change.test.test.com",
"--change-port", "6001",
"--change-replication-ip", "change.r.test.com",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
new_cmd_format, opts, args = validate_args(argv)
self.assertTrue(new_cmd_format)
self.assertEqual(opts.id, 1)
self.assertEqual(opts.region, 2)
self.assertEqual(opts.zone, 3)
self.assertEqual(opts.ip, "test.test.com")
self.assertEqual(opts.port, 6000)
self.assertEqual(opts.replication_ip, "r.test.com")
self.assertEqual(opts.replication_port, 7000)
self.assertEqual(opts.device, "sda3")
self.assertEqual(opts.meta, "some meta data")
self.assertEqual(opts.weight, 3.14159265359)
self.assertEqual(opts.change_ip, "change.test.test.com")
self.assertEqual(opts.change_port, 6001)
self.assertEqual(opts.change_replication_ip, "change.r.test.com")
self.assertEqual(opts.change_replication_port, 7001)
self.assertEqual(opts.change_device, "sdb3")
self.assertEqual(opts.change_meta, "some meta data for change")
argv = \
["--id", "0", "--region", "0", "--zone", "0",
"--ip", "",
"--port", "0",
"--replication-ip", "",
"--replication-port", "0",
"--device", "",
"--meta", "",
"--weight", "0",
"--change-ip", "",
"--change-port", "0",
"--change-replication-ip", "",
"--change-replication-port", "0",
"--change-device", "",
"--change-meta", ""]
new_cmd_format, opts, args = validate_args(argv)
self.assertFalse(new_cmd_format)
argv = \
["--id", "0", "--region", "0", "--zone", "0",
"--ip", "",
"--port", "0",
"--replication-ip", "",
"--replication-port", "0",
"--device", "",
"--meta", "",
"--weight", "0",
"--change-ip", "change.test.test.com",
"--change-port", "6001",
"--change-replication-ip", "change.r.test.com",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
new_cmd_format, opts, args = validate_args(argv)
self.assertFalse(new_cmd_format)
def test_parse_args(self):
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "change.test.test.com",
"--change-port", "6001",
"--change-replication-ip", "change.r.test.com",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
opts, args = parse_args(argv)
self.assertEqual(opts.id, 1)
self.assertEqual(opts.region, 2)
self.assertEqual(opts.zone, 3)
self.assertEqual(opts.ip, "test.test.com")
self.assertEqual(opts.port, 6000)
self.assertEqual(opts.replication_ip, "r.test.com")
self.assertEqual(opts.replication_port, 7000)
self.assertEqual(opts.device, "sda3")
self.assertEqual(opts.meta, "some meta data")
self.assertEqual(opts.weight, 3.14159265359)
self.assertEqual(opts.change_ip, "change.test.test.com")
self.assertEqual(opts.change_port, 6001)
self.assertEqual(opts.change_replication_ip, "change.r.test.com")
self.assertEqual(opts.change_replication_port, 7001)
self.assertEqual(opts.change_device, "sdb3")
self.assertEqual(opts.change_meta, "some meta data for change")
self.assertEqual(len(args), 0)
def test_parse_builder_ring_filename_args(self):
args = 'swift-ring-builder object.builder write_ring'
self.assertEquals((
'object.builder', 'object.ring.gz'
), parse_builder_ring_filename_args(args.split()))
args = 'swift-ring-builder container.ring.gz write_builder'
self.assertEquals((
'container.builder', 'container.ring.gz'
), parse_builder_ring_filename_args(args.split()))
# builder name arg should always fall through
args = 'swift-ring-builder test create'
self.assertEquals((
'test', 'test.ring.gz'
), parse_builder_ring_filename_args(args.split()))
args = 'swift-ring-builder my.file.name create'
self.assertEquals((
'my.file.name', 'my.file.name.ring.gz'
), parse_builder_ring_filename_args(args.split()))
def test_build_dev_from_opts(self):
argv = \
["--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359"]
expected = {
'region': 2,
'zone': 3,
'ip': "test.test.com",
'port': 6000,
'replication_ip': "r.test.com",
'replication_port': 7000,
'device': "sda3",
'meta': "some meta data",
'weight': 3.14159265359,
}
opts, args = parse_args(argv)
device = build_dev_from_opts(opts)
self.assertEquals(device, expected)
argv = \
["--region", "2", "--zone", "3",
"--ip", "[test.test.com]",
"--port", "6000",
"--replication-ip", "[r.test.com]",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359"]
opts, args = parse_args(argv)
self.assertRaises(ValueError, build_dev_from_opts, opts)
argv = \
["--region", "2", "--zone", "3",
"--ip", "[test.test.com]",
"--port", "6000",
"--replication-ip", "[r.test.com]",
"--replication-port", "7000",
"--meta", "some meta data",
"--weight", "3.14159265359"]
opts, args = parse_args(argv)
self.assertRaises(ValueError, build_dev_from_opts, opts)
def test_replication_defaults(self):
args = '-r 1 -z 1 -i 127.0.0.1 -p 6010 -d d1 -w 100'.split()
opts, _ = parse_args(args)
device = build_dev_from_opts(opts)
expected = {
'device': 'd1',
'ip': '127.0.0.1',
'meta': '',
'port': 6010,
'region': 1,
'replication_ip': '127.0.0.1',
'replication_port': 6010,
'weight': 100.0,
'zone': 1,
}
self.assertEquals(device, expected)
args = '-r 1 -z 1 -i test.com -p 6010 -d d1 -w 100'.split()
opts, _ = parse_args(args)
device = build_dev_from_opts(opts)
expected = {
'device': 'd1',
'ip': 'test.com',
'meta': '',
'port': 6010,
'region': 1,
'replication_ip': 'test.com',
'replication_port': 6010,
'weight': 100.0,
'zone': 1,
}
self.assertEquals(device, expected)
def test_dispersion_report(self):
rb = ring.RingBuilder(8, 3, 0)
rb.add_dev({'id': 0, 'region': 1, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 4, 'region': 1, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdc1'})
rb.add_dev({'id': 5, 'region': 1, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1'})
rb.add_dev({'id': 7, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc1'})
rb.add_dev({'id': 8, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 9, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdb1'})
rb.add_dev({'id': 10, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1'})
rb.add_dev({'id': 11, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdd1'})
# this ring is pretty volatile and the assertions are pretty brittle
# so we use a specific seed
rb.rebalance(seed=100)
rb.validate()
self.assertEqual(rb.dispersion, 39.0625)
report = dispersion_report(rb)
self.assertEqual(report['worst_tier'], 'r1z1')
self.assertEqual(report['max_dispersion'], 39.0625)
def build_tier_report(max_replicas, placed_parts, dispersion,
replicas):
return {
'max_replicas': max_replicas,
'placed_parts': placed_parts,
'dispersion': dispersion,
'replicas': replicas,
}
# Each node should store 256 partitions to avoid multiple replicas
# 2/5 of total weight * 768 ~= 307 -> 51 partitions on each node in
# zone 1 are stored at least twice on the nodes
expected = [
['r1z1', build_tier_report(
2, 256, 39.0625, [0, 0, 156, 100])],
['r1z1-127.0.0.1', build_tier_report(
1, 256, 19.53125, [0, 206, 50, 0])],
['r1z1-127.0.0.2', build_tier_report(
1, 256, 19.53125, [0, 206, 50, 0])],
]
report = dispersion_report(rb, 'r1z1[^/]*$', verbose=True)
graph = report['graph']
for i, (expected_key, expected_report) in enumerate(expected):
key, report = graph[i]
self.assertEqual(
(key, report),
(expected_key, expected_report)
)
# overcompensate in r1z0
rb.add_dev({'id': 12, 'region': 1, 'zone': 0, 'weight': 500,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 13, 'region': 1, 'zone': 0, 'weight': 500,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdb1'})
rb.add_dev({'id': 14, 'region': 1, 'zone': 0, 'weight': 500,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdc1'})
rb.add_dev({'id': 15, 'region': 1, 'zone': 0, 'weight': 500,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1'})
rb.rebalance(seed=10)
report = dispersion_report(rb)
self.assertEqual(rb.dispersion, 44.53125)
self.assertEqual(report['worst_tier'], 'r1z0-127.0.0.3')
self.assertEqual(report['max_dispersion'], 32.520325203252035)
def test_parse_address_old_format(self):
# Test old format
argv = "127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data"
ip, port, rest = parse_address(argv)
self.assertEqual(ip, '127.0.0.1')
self.assertEqual(port, 6000)
self.assertEqual(rest, 'R127.0.0.1:6000/sda1_some meta data')
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel, StatusModel, TimeFramedModel, TitleSlugModel
from model_utils.tracker import FieldTracker, ModelTracker
from model_utils.managers import QueryManager, InheritanceManager, PassThroughManager
from model_utils.fields import SplitField, MonitorField, StatusField
from model_utils.tests.fields import MutableField
from model_utils import Choices
class InheritanceManagerTestRelated(models.Model):
pass
@python_2_unicode_compatible
class InheritanceManagerTestParent(models.Model):
# FileField is just a handy descriptor-using field. Refs #6.
non_related_field_using_descriptor = models.FileField(upload_to="test")
related = models.ForeignKey(
InheritanceManagerTestRelated, related_name="imtests", null=True)
normal_field = models.TextField()
related_self = models.OneToOneField("self", related_name="imtests_self", null=True)
objects = InheritanceManager()
def __unicode__(self):
return unicode(self.pk)
def __str__(self):
return "%s(%s)" % (
self.__class__.__name__[len('InheritanceManagerTest'):],
self.pk,
)
class InheritanceManagerTestChild1(InheritanceManagerTestParent):
non_related_field_using_descriptor_2 = models.FileField(upload_to="test")
normal_field_2 = models.TextField()
objects = InheritanceManager()
class InheritanceManagerTestGrandChild1(InheritanceManagerTestChild1):
text_field = models.TextField()
class InheritanceManagerTestGrandChild1_2(InheritanceManagerTestChild1):
text_field = models.TextField()
class InheritanceManagerTestChild2(InheritanceManagerTestParent):
non_related_field_using_descriptor_2 = models.FileField(upload_to="test")
normal_field_2 = models.TextField()
class Slug(TitleSlugModel):
pass
class InheritanceManagerTestChild3(InheritanceManagerTestParent):
parent_ptr = models.OneToOneField(
InheritanceManagerTestParent, related_name='manual_onetoone',
parent_link=True)
class TimeStamp(TimeStampedModel):
pass
class TimeFrame(TimeFramedModel):
pass
class TimeFrameManagerAdded(TimeFramedModel):
pass
class Monitored(models.Model):
name = models.CharField(max_length=25)
name_changed = MonitorField(monitor="name")
class MonitorWhen(models.Model):
name = models.CharField(max_length=25)
name_changed = MonitorField(monitor="name", when=["Jose", "Maria"])
class MonitorWhenEmpty(models.Model):
name = models.CharField(max_length=25)
name_changed = MonitorField(monitor="name", when=[])
class Status(StatusModel):
STATUS = Choices(
("active", _("active")),
("deleted", _("deleted")),
("on_hold", _("on hold")),
)
class StatusPlainTuple(StatusModel):
STATUS = (
("active", _("active")),
("deleted", _("deleted")),
("on_hold", _("on hold")),
)
class StatusManagerAdded(StatusModel):
STATUS = (
("active", _("active")),
("deleted", _("deleted")),
("on_hold", _("on hold")),
)
class Post(models.Model):
published = models.BooleanField(default=False)
confirmed = models.BooleanField(default=False)
order = models.IntegerField()
objects = models.Manager()
public = QueryManager(published=True)
public_confirmed = QueryManager(models.Q(published=True) &
models.Q(confirmed=True))
public_reversed = QueryManager(published=True).order_by("-order")
class Meta:
ordering = ("order",)
class Article(models.Model):
title = models.CharField(max_length=50)
body = SplitField()
class SplitFieldAbstractParent(models.Model):
content = SplitField()
class Meta:
abstract = True
class NoRendered(models.Model):
"""
Test that the no_excerpt_field keyword arg works. This arg should
never be used except by the South model-freezing.
"""
body = SplitField(no_excerpt_field=True)
class AuthorMixin(object):
def by_author(self, name):
return self.filter(author=name)
class PublishedMixin(object):
def published(self):
return self.filter(published=True)
def unpublished(self):
return self.filter(published=False)
class ByAuthorQuerySet(models.query.QuerySet, AuthorMixin):
pass
class FeaturedManager(models.Manager):
def get_queryset(self):
kwargs = {}
if hasattr(self, "_db"):
kwargs["using"] = self._db
return ByAuthorQuerySet(self.model, **kwargs).filter(feature=True)
get_query_set = get_queryset
class DudeQuerySet(models.query.QuerySet):
def abiding(self):
return self.filter(abides=True)
def rug_positive(self):
return self.filter(has_rug=True)
def rug_negative(self):
return self.filter(has_rug=False)
def by_name(self, name):
return self.filter(name__iexact=name)
class AbidingManager(PassThroughManager):
def get_queryset(self):
return DudeQuerySet(self.model).abiding()
get_query_set = get_queryset
def get_stats(self):
return {
"abiding_count": self.count(),
"rug_count": self.rug_positive().count(),
}
class Dude(models.Model):
abides = models.BooleanField(default=True)
name = models.CharField(max_length=20)
has_rug = models.BooleanField(default=False)
objects = PassThroughManager(DudeQuerySet)
abiders = AbidingManager()
class Car(models.Model):
name = models.CharField(max_length=20)
owner = models.ForeignKey(Dude, related_name='cars_owned')
objects = PassThroughManager(DudeQuerySet)
class SpotManager(PassThroughManager):
def get_queryset(self):
return super(SpotManager, self).get_queryset().filter(secret=False)
get_query_set = get_queryset
class SpotQuerySet(models.query.QuerySet):
def closed(self):
return self.filter(closed=True)
def secured(self):
return self.filter(secure=True)
class Spot(models.Model):
name = models.CharField(max_length=20)
secure = models.BooleanField(default=True)
closed = models.BooleanField(default=False)
secret = models.BooleanField(default=False)
owner = models.ForeignKey(Dude, related_name='spots_owned')
objects = SpotManager.for_queryset_class(SpotQuerySet)()
class Tracked(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
mutable = MutableField()
tracker = FieldTracker()
class TrackedFK(models.Model):
fk = models.ForeignKey('Tracked')
tracker = FieldTracker()
custom_tracker = FieldTracker(fields=['fk_id'])
custom_tracker_without_id = FieldTracker(fields=['fk'])
class TrackedNotDefault(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
name_tracker = FieldTracker(fields=['name'])
class TrackedNonFieldAttr(models.Model):
number = models.FloatField()
@property
def rounded(self):
return round(self.number) if self.number is not None else None
tracker = FieldTracker(fields=['rounded'])
class TrackedMultiple(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
name_tracker = FieldTracker(fields=['name'])
number_tracker = FieldTracker(fields=['number'])
class InheritedTracked(Tracked):
name2 = models.CharField(max_length=20)
class ModelTracked(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
mutable = MutableField()
tracker = ModelTracker()
class ModelTrackedFK(models.Model):
fk = models.ForeignKey('ModelTracked')
tracker = ModelTracker()
custom_tracker = ModelTracker(fields=['fk_id'])
custom_tracker_without_id = ModelTracker(fields=['fk'])
class ModelTrackedNotDefault(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
name_tracker = ModelTracker(fields=['name'])
class ModelTrackedMultiple(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
name_tracker = ModelTracker(fields=['name'])
number_tracker = ModelTracker(fields=['number'])
class InheritedModelTracked(ModelTracked):
name2 = models.CharField(max_length=20)
class StatusFieldDefaultFilled(models.Model):
STATUS = Choices((0, "no", "No"), (1, "yes", "Yes"))
status = StatusField(default=STATUS.yes)
class StatusFieldDefaultNotFilled(models.Model):
STATUS = Choices((0, "no", "No"), (1, "yes", "Yes"))
status = StatusField()
class StatusFieldChoicesName(models.Model):
NAMED_STATUS = Choices((0, "no", "No"), (1, "yes", "Yes"))
status = StatusField(choices_name='NAMED_STATUS')
|
|
def init_ephem(orbits, load_path=None, show=False,
parallax_correction=False):
'''Initialize Skyfield ephemeris for Jupiter BSP file
Takes output of io.parse_occ as input.
Requires Astropy and SkyField
Optional:
load_path (where the bsp and SkyField data files are found.
parllax_correction (apply the parallax correction from NuSTAR's orbit).
Downloads the latest TLE archive from the NuSTAR SOC.
Returns:
observer, jupiter, ts
The first two are Skyfield objects. The third is the Skyfield time series
object.
'''
from skyfield.api import Loader, EarthSatellite
from astropy.time import Time
if load_path is None:
load_path = './'
load=Loader(load_path)
else:
load=Loader(load_path)
planets = load('de436.bsp')
moon, earth = planets['moon'], planets['earth']
ts = load.timescale()
if parallax_correction is False:
observer = earth
else:
import nustar_planning.io as io
start_date = orbits.loc[0, 'visible']
utc = Time(start_date)
tlefile = io.download_tle(outdir=load_path)
mindt, line1, line2 = io.get_epoch_tle(utc, tlefile)
nustar = EarthSatellite(line1, line2)
observer = earth + nustar
return observer, moon, ts
def position(orbits, outfile=None,load_path=None, show=False,
parallax_correction=False, steps=5):
'''Get the instantaious position of the Moon at a number of intervals though the
orbit.
Takes output of parse_occ as input.
Initializes the ephemeris and then loops over each orbit, splits the orbit up into a
number of intervals (default is 5) to give you the instantaneous astrometric position
of the Moon at each time.
Optional:
load_path (where the bsp and SkyField data files are found.
parllax_correction (apply the parallax correction from NuSTAR's orbit).
Downloads the latest TLE archive from the NuSTAR SOC.
outfile: A text file where you can store the output.
If outfile=None then the output is written to stdout.
show: Force output to stdout even if you write an output file.
steps: Number of intervals to use (default is 5).
returns
'''
from astropy.time import Time
from datetime import timedelta
import astropy.units as u
if outfile is None and show is False:
show=True
dt = 0.
if outfile is not None:
f = open(outfile, 'w')
f.write('Aim Time RA Dec\n')
observer, moon, ts = init_ephem(orbits,
load_path=load_path, show=show,
parallax_correction=parallax_correction)
if show is True:
print('Aim Time RA Dec')
# Loop over every orbit:
for ind in range(len(orbits)):
tstart = orbits.loc[ind, 'visible']
tend = orbits.loc[ind, 'occulted']
on_time = (tend - tstart).total_seconds()
dt = ( on_time ) / steps
for i in range(steps):
point_time = tstart + timedelta(seconds=dt * i)
astro_time = Time(point_time)
t = ts.from_astropy(astro_time)
# Get the coordinates.
astrometric = observer.at(t).observe(moon)
ra, dec, distance = astrometric.radec()
# Store output in degrees
radeg = ra.to(u.deg)
decdeg = dec.to(u.deg)
if show is True:
print(point_time.isoformat()+' {:.5f} {:.5f}'.format(radeg.value, decdeg.value))
if outfile is not None:
f.write(point_time.isoformat()+' {:.5f} {:.5f}'.format(radeg.value, decdeg.value)+'\n')
if outfile is not None:
f.close()
return
def position_shift(orbits, outfile=None,load_path=None, show=False,
parallax_correction=True, **kwargs):
"""Get the pointing position for the lunar limb observations.
Takes output of parse_occ as input. Initializes the ephemeris and then loops over
each orbit, splitting the orbit up into a number of dwells where you keep the limb of
the Moon relatively fixed in the FoV.
The default here is to move the *center* of the FoV so that it's sitting on the
lunar limb. We move 1 Rmoon away from the center of the Moon in such a way that
you always keep Dets 0 and 1 occulted by the lunar limb.
Right now, just writes the results out to the stdout.
Parameters
----------
load_path: str, optional
For the Skyfield download files
parllax_correction: bool
Apply the NuSTAR parallax corrections (True)
min_dwell: float with Astropy time units
Minimum amount of time to dwell at each position. Default is 0*u.s
min_shift: float with Astropy angle units
Shift when the Moons move this much. Default is 360*u.arcsec
pad_time: float with Astropy time units
Extends the orbit from the occultation file by this amount in both directions.
Default in 5 minutes.
pa: float with Astropy units
Position angle of the NuSTAR FoV. Default is 0*u.deg (co-aligned with North).
outfile: string
File for ascii output
Returns
-------
List of pointing positions to stdout
"""
from astropy.time import Time
from datetime import timedelta
import astropy.units as u
import numpy as np
from astropy.coordinates import SkyCoord
Rmoon=kwargs.get('Rmoon', 940*u.arcsec)
min_dwell = kwargs.get('min_dwell', 0*u.s)
min_shift = kwargs.get('min_shift', 360*u.arcsec)
pa = kwargs.get('pa', 0*u.deg)
# Flip this around so that you're stepping *opposite* to this direction.
# Now step *along* this direction. Should put the FoV with Det0 off the top of
# the moon.
pa = pa #+ np.pi * u.rad
dt = kwargs.get('dt', 5.0*u.s)
diag = kwargs.get('diag', False)
pad_time = kwargs.get('pad_time', 5*u.min)
# if outfile is None and show is False:
# show=True
#
if outfile is not None:
f = open(outfile, 'w')
f.write('Arrive By RA Dec\n')
observer, moon, ts = init_ephem(orbits,
load_path=load_path, show=show,
parallax_correction=parallax_correction)
# if show is True:
# print('Aim Time RA Dec')
# Loop over every orbit:
for ind in range(len(orbits)):
tstart = orbits.loc[ind, 'visible'] - timedelta(minutes=pad_time.to(u.min).value)
tend = orbits.loc[ind, 'occulted'] + timedelta(minutes=pad_time.to(u.min).value)
on_time = (tend - tstart).total_seconds()
steps = int(on_time / dt.to(u.s).value)
last_point = None
for i in range(steps):
point_time = tstart + timedelta(seconds=dt.to(u.s).value * i)
astro_time = Time(point_time)
t = ts.from_astropy(astro_time)
# Get the coordinates.
astrometric = observer.at(t).observe(moon)
ra, dec, distance = astrometric.radec()
# Store output in degrees
radeg = ra.to(u.deg)
decdeg = dec.to(u.deg)
this_point = SkyCoord(radeg, decdeg, unit="deg")
if last_point is not None:
dshift = this_point.separation(last_point)
dwell = point_time - last_time
# Aim halfway between the two positions
aim_time = 0.5*dwell + last_time
# Below was for diagnostic testing and is *wrong*. Use the 50%
# location above to aim the damn satellite. This was used for the lunar
# eclipse test.
# aim_time = point_time
if (dshift.arcsec > min_shift.to(u.arcsec).value) & (dwell.seconds > min_dwell.to(u.s).value):
if diag is True:
print('Start of dwell: '+last_time.isoformat())
print('End of dwell: '+point_time.isoformat())
print('')
print('Time used to aim: '+aim_time.isoformat())
print('Dwell Duration (sec): {:.2f} \n Dwell Drift (arcmin): {:.2f}'.format(dwell.seconds, dshift.arcmin))
print('')
# Below is where we correctly compute the aim location for the
# aim_time
astro_time = Time(aim_time)
t = ts.from_astropy(astro_time)
astrometric_aim = observer.at(t).observe(moon)
ra_aim, dec_aim, distance = astrometric.radec()
dec_point = dec_aim.to(u.deg) + Rmoon.to(u.deg) * np.cos(pa)
ra_point = ra_aim.to(u.deg) + Rmoon.to(u.deg) * np.sin(pa) / np.cos(dec_aim.to(u.deg))
# We report the "last_time" instead of the "aim time" below because
# this is when we want the spacecraft to arrive at the new position.
if show is True:
print(last_time.strftime('%Y:%j:%H:%M:%S')+' RA: {:.5f} Dec: {:.5f}'.format(ra_point.value, dec_point.value))
print('')
if outfile is not None:
f.write(last_time.strftime('%Y:%j:%H:%M:%S')+' {:.5f} {:.5f}'.format(ra_point.value, dec_point.value)+'\n')
# Copy over for next dwell computation
last_time = point_time
last_point = this_point
else:
last_point = this_point
last_time = point_time
if outfile is not None:
f.close()
return
|
|
#! /usr/bin/env python
"""Generate C code from an ASDL description."""
import os, sys
import asdl
TABSIZE = 4
MAX_COL = 80
def get_c_type(name):
"""Return a string for the C name of the type.
This function special cases the default types provided by asdl.
"""
if name in asdl.builtin_types:
return name
else:
return "%s_ty" % name
def reflow_lines(s, depth):
"""Reflow the line s indented depth tabs.
Return a sequence of lines where no line extends beyond MAX_COL
when properly indented. The first line is properly indented based
exclusively on depth * TABSIZE. All following lines -- these are
the reflowed lines generated by this function -- start at the same
column as the first character beyond the opening { in the first
line.
"""
size = MAX_COL - depth * TABSIZE
if len(s) < size:
return [s]
lines = []
cur = s
padding = ""
while len(cur) > size:
i = cur.rfind(' ', 0, size)
# XXX this should be fixed for real
if i == -1 and 'GeneratorExp' in cur:
i = size + 3
assert i != -1, "Impossible line %d to reflow: %r" % (size, s)
lines.append(padding + cur[:i])
if len(lines) == 1:
# find new size based on brace
j = cur.find('{', 0, i)
if j >= 0:
j += 2 # account for the brace and the space after it
size -= j
padding = " " * j
else:
j = cur.find('(', 0, i)
if j >= 0:
j += 1 # account for the paren (no space after it)
size -= j
padding = " " * j
cur = cur[i+1:]
else:
lines.append(padding + cur)
return lines
def is_simple(sum):
"""Return True if a sum is a simple.
A sum is simple if its types have no fields, e.g.
unaryop = Invert | Not | UAdd | USub
"""
for t in sum.types:
if t.fields:
return False
return True
class EmitVisitor(asdl.VisitorBase):
"""Visit that emits lines"""
def __init__(self, file):
self.file = file
self.identifiers = set()
super(EmitVisitor, self).__init__()
def emit_identifier(self, name):
name = str(name)
if name in self.identifiers:
return
self.emit("_Py_IDENTIFIER(%s);" % name, 0)
self.identifiers.add(name)
def emit(self, s, depth, reflow=True):
# XXX reflow long lines?
if reflow:
lines = reflow_lines(s, depth)
else:
lines = [s]
for line in lines:
line = (" " * TABSIZE * depth) + line + "\n"
self.file.write(line)
class TypeDefVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if is_simple(sum):
self.simple_sum(sum, name, depth)
else:
self.sum_with_constructors(sum, name, depth)
def simple_sum(self, sum, name, depth):
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s=%d" % (type.name, i + 1))
enums = ", ".join(enum)
ctype = get_c_type(name)
s = "typedef enum _%s { %s } %s;" % (name, enums, ctype)
self.emit(s, depth)
self.emit("", depth)
def sum_with_constructors(self, sum, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
def visitProduct(self, product, name, depth):
ctype = get_c_type(name)
s = "typedef struct _%(name)s *%(ctype)s;" % locals()
self.emit(s, depth)
self.emit("", depth)
class StructVisitor(EmitVisitor):
"""Visitor to generate typedefs for AST."""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if not is_simple(sum):
self.sum_with_constructors(sum, name, depth)
def sum_with_constructors(self, sum, name, depth):
def emit(s, depth=depth):
self.emit(s % sys._getframe(1).f_locals, depth)
enum = []
for i in range(len(sum.types)):
type = sum.types[i]
enum.append("%s_kind=%d" % (type.name, i + 1))
emit("enum _%(name)s_kind {" + ", ".join(enum) + "};")
emit("struct _%(name)s {")
emit("enum _%(name)s_kind kind;", depth + 1)
emit("union {", depth + 1)
for t in sum.types:
self.visit(t, depth + 2)
emit("} v;", depth + 1)
for field in sum.attributes:
# rudimentary attribute handling
type = str(field.type)
assert type in asdl.builtin_types, type
emit("%s %s;" % (type, field.name), depth + 1);
emit("};")
emit("")
def visitConstructor(self, cons, depth):
if cons.fields:
self.emit("struct {", depth)
for f in cons.fields:
self.visit(f, depth + 1)
self.emit("} %s;" % cons.name, depth)
self.emit("", depth)
def visitField(self, field, depth):
# XXX need to lookup field.type, because it might be something
# like a builtin...
ctype = get_c_type(field.type)
name = field.name
if field.seq:
if field.type == 'cmpop':
self.emit("asdl_int_seq *%(name)s;" % locals(), depth)
else:
self.emit("asdl_seq *%(name)s;" % locals(), depth)
else:
self.emit("%(ctype)s %(name)s;" % locals(), depth)
def visitProduct(self, product, name, depth):
self.emit("struct _%(name)s {" % locals(), depth)
for f in product.fields:
self.visit(f, depth + 1)
for field in product.attributes:
# rudimentary attribute handling
type = str(field.type)
assert type in asdl.builtin_types, type
self.emit("%s %s;" % (type, field.name), depth + 1);
self.emit("};", depth)
self.emit("", depth)
class PrototypeVisitor(EmitVisitor):
"""Generate function prototypes for the .h file"""
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitSum(self, sum, name):
if is_simple(sum):
pass # XXX
else:
for t in sum.types:
self.visit(t, name, sum.attributes)
def get_args(self, fields):
"""Return list of C argument into, one for each field.
Argument info is 3-tuple of a C type, variable name, and flag
that is true if type can be NULL.
"""
args = []
unnamed = {}
for f in fields:
if f.name is None:
name = f.type
c = unnamed[name] = unnamed.get(name, 0) + 1
if c > 1:
name = "name%d" % (c - 1)
else:
name = f.name
# XXX should extend get_c_type() to handle this
if f.seq:
if f.type == 'cmpop':
ctype = "asdl_int_seq *"
else:
ctype = "asdl_seq *"
else:
ctype = get_c_type(f.type)
args.append((ctype, name, f.opt or f.seq))
return args
def visitConstructor(self, cons, type, attrs):
args = self.get_args(cons.fields)
attrs = self.get_args(attrs)
ctype = get_c_type(type)
self.emit_function(cons.name, ctype, args, attrs)
def emit_function(self, name, ctype, args, attrs, union=True):
args = args + attrs
if args:
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args])
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
margs = "a0"
for i in range(1, len(args)+1):
margs += ", a%d" % i
self.emit("#define %s(%s) _Py_%s(%s)" % (name, margs, name, margs), 0,
reflow=False)
self.emit("%s _Py_%s(%s);" % (ctype, name, argstr), False)
def visitProduct(self, prod, name):
self.emit_function(name, get_c_type(name),
self.get_args(prod.fields),
self.get_args(prod.attributes),
union=False)
class FunctionVisitor(PrototypeVisitor):
"""Visitor to generate constructor functions for AST."""
def emit_function(self, name, ctype, args, attrs, union=True):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
argstr = ", ".join(["%s %s" % (atype, aname)
for atype, aname, opt in args + attrs])
if argstr:
argstr += ", PyArena *arena"
else:
argstr = "PyArena *arena"
self.emit("%s" % ctype, 0)
emit("%s(%s)" % (name, argstr))
emit("{")
emit("%s p;" % ctype, 1)
for argtype, argname, opt in args:
if not opt and argtype != "int":
emit("if (!%s) {" % argname, 1)
emit("PyErr_SetString(PyExc_ValueError,", 2)
msg = "field %s is required for %s" % (argname, name)
emit(' "%s");' % msg,
2, reflow=False)
emit('return NULL;', 2)
emit('}', 1)
emit("p = (%s)PyArena_Malloc(arena, sizeof(*p));" % ctype, 1);
emit("if (!p)", 1)
emit("return NULL;", 2)
if union:
self.emit_body_union(name, args, attrs)
else:
self.emit_body_struct(name, args, attrs)
emit("return p;", 1)
emit("}")
emit("")
def emit_body_union(self, name, args, attrs):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
emit("p->kind = %s_kind;" % name, 1)
for argtype, argname, opt in args:
emit("p->v.%s.%s = %s;" % (name, argname, argname), 1)
for argtype, argname, opt in attrs:
emit("p->%s = %s;" % (argname, argname), 1)
def emit_body_struct(self, name, args, attrs):
def emit(s, depth=0, reflow=True):
self.emit(s, depth, reflow)
for argtype, argname, opt in args:
emit("p->%s = %s;" % (argname, argname), 1)
for argtype, argname, opt in attrs:
emit("p->%s = %s;" % (argname, argname), 1)
class PickleVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, type.name)
def visitSum(self, sum, name):
pass
def visitProduct(self, sum, name):
pass
def visitConstructor(self, cons, name):
pass
def visitField(self, sum):
pass
class Obj2ModPrototypeVisitor(PickleVisitor):
def visitProduct(self, prod, name):
code = "static int obj2ast_%s(PyObject* obj, %s* out, PyArena* arena);"
self.emit(code % (name, get_c_type(name)), 0)
visitSum = visitProduct
class Obj2ModVisitor(PickleVisitor):
def funcHeader(self, name):
ctype = get_c_type(name)
self.emit("int", 0)
self.emit("obj2ast_%s(PyObject* obj, %s* out, PyArena* arena)" % (name, ctype), 0)
self.emit("{", 0)
self.emit("int isinstance;", 1)
self.emit("", 0)
def sumTrailer(self, name, add_label=False):
self.emit("", 0)
# there's really nothing more we can do if this fails ...
error = "expected some sort of %s, but got %%R" % name
format = "PyErr_Format(PyExc_TypeError, \"%s\", obj);"
self.emit(format % error, 1, reflow=False)
if add_label:
self.emit("failed:", 1)
self.emit("Py_XDECREF(tmp);", 1)
self.emit("return 1;", 1)
self.emit("}", 0)
self.emit("", 0)
def simpleSum(self, sum, name):
self.funcHeader(name)
for t in sum.types:
line = ("isinstance = PyObject_IsInstance(obj, "
"(PyObject *)%s_type);")
self.emit(line % (t.name,), 1)
self.emit("if (isinstance == -1) {", 1)
self.emit("return 1;", 2)
self.emit("}", 1)
self.emit("if (isinstance) {", 1)
self.emit("*out = %s;" % t.name, 2)
self.emit("return 0;", 2)
self.emit("}", 1)
self.sumTrailer(name)
def buildArgs(self, fields):
return ", ".join(fields + ["arena"])
def complexSum(self, sum, name):
self.funcHeader(name)
self.emit("PyObject *tmp = NULL;", 1)
for a in sum.attributes:
self.visitAttributeDeclaration(a, name, sum=sum)
self.emit("", 0)
# XXX: should we only do this for 'expr'?
self.emit("if (obj == Py_None) {", 1)
self.emit("*out = NULL;", 2)
self.emit("return 0;", 2)
self.emit("}", 1)
for a in sum.attributes:
self.visitField(a, name, sum=sum, depth=1)
for t in sum.types:
line = "isinstance = PyObject_IsInstance(obj, (PyObject*)%s_type);"
self.emit(line % (t.name,), 1)
self.emit("if (isinstance == -1) {", 1)
self.emit("return 1;", 2)
self.emit("}", 1)
self.emit("if (isinstance) {", 1)
for f in t.fields:
self.visitFieldDeclaration(f, t.name, sum=sum, depth=2)
self.emit("", 0)
for f in t.fields:
self.visitField(f, t.name, sum=sum, depth=2)
args = [f.name for f in t.fields] + [a.name for a in sum.attributes]
self.emit("*out = %s(%s);" % (t.name, self.buildArgs(args)), 2)
self.emit("if (*out == NULL) goto failed;", 2)
self.emit("return 0;", 2)
self.emit("}", 1)
self.sumTrailer(name, True)
def visitAttributeDeclaration(self, a, name, sum=sum):
ctype = get_c_type(a.type)
self.emit("%s %s;" % (ctype, a.name), 1)
def visitSum(self, sum, name):
if is_simple(sum):
self.simpleSum(sum, name)
else:
self.complexSum(sum, name)
def visitProduct(self, prod, name):
ctype = get_c_type(name)
self.emit("int", 0)
self.emit("obj2ast_%s(PyObject* obj, %s* out, PyArena* arena)" % (name, ctype), 0)
self.emit("{", 0)
self.emit("PyObject* tmp = NULL;", 1)
for f in prod.fields:
self.visitFieldDeclaration(f, name, prod=prod, depth=1)
for a in prod.attributes:
self.visitFieldDeclaration(a, name, prod=prod, depth=1)
self.emit("", 0)
for f in prod.fields:
self.visitField(f, name, prod=prod, depth=1)
for a in prod.attributes:
self.visitField(a, name, prod=prod, depth=1)
args = [f.name for f in prod.fields]
args.extend([a.name for a in prod.attributes])
self.emit("*out = %s(%s);" % (name, self.buildArgs(args)), 1)
self.emit("return 0;", 1)
self.emit("failed:", 0)
self.emit("Py_XDECREF(tmp);", 1)
self.emit("return 1;", 1)
self.emit("}", 0)
self.emit("", 0)
def visitFieldDeclaration(self, field, name, sum=None, prod=None, depth=0):
ctype = get_c_type(field.type)
if field.seq:
if self.isSimpleType(field):
self.emit("asdl_int_seq* %s;" % field.name, depth)
else:
self.emit("asdl_seq* %s;" % field.name, depth)
else:
ctype = get_c_type(field.type)
self.emit("%s %s;" % (ctype, field.name), depth)
def isSimpleSum(self, field):
# XXX can the members of this list be determined automatically?
return field.type in ('expr_context', 'boolop', 'operator',
'unaryop', 'cmpop')
def isNumeric(self, field):
return get_c_type(field.type) in ("int", "bool")
def isSimpleType(self, field):
return self.isSimpleSum(field) or self.isNumeric(field)
def visitField(self, field, name, sum=None, prod=None, depth=0):
ctype = get_c_type(field.type)
if field.opt:
check = "exists_not_none(obj, &PyId_%s)" % (field.name,)
else:
check = "_PyObject_HasAttrId(obj, &PyId_%s)" % (field.name,)
self.emit("if (%s) {" % (check,), depth, reflow=False)
self.emit("int res;", depth+1)
if field.seq:
self.emit("Py_ssize_t len;", depth+1)
self.emit("Py_ssize_t i;", depth+1)
self.emit("tmp = _PyObject_GetAttrId(obj, &PyId_%s);" % field.name, depth+1)
self.emit("if (tmp == NULL) goto failed;", depth+1)
if field.seq:
self.emit("if (!PyList_Check(tmp)) {", depth+1)
self.emit("PyErr_Format(PyExc_TypeError, \"%s field \\\"%s\\\" must "
"be a list, not a %%.200s\", tmp->ob_type->tp_name);" %
(name, field.name),
depth+2, reflow=False)
self.emit("goto failed;", depth+2)
self.emit("}", depth+1)
self.emit("len = PyList_GET_SIZE(tmp);", depth+1)
if self.isSimpleType(field):
self.emit("%s = _Py_asdl_int_seq_new(len, arena);" % field.name, depth+1)
else:
self.emit("%s = _Py_asdl_seq_new(len, arena);" % field.name, depth+1)
self.emit("if (%s == NULL) goto failed;" % field.name, depth+1)
self.emit("for (i = 0; i < len; i++) {", depth+1)
self.emit("%s value;" % ctype, depth+2)
self.emit("res = obj2ast_%s(PyList_GET_ITEM(tmp, i), &value, arena);" %
field.type, depth+2, reflow=False)
self.emit("if (res != 0) goto failed;", depth+2)
self.emit("if (len != PyList_GET_SIZE(tmp)) {", depth+2)
self.emit("PyErr_SetString(PyExc_RuntimeError, \"%s field \\\"%s\\\" "
"changed size during iteration\");" %
(name, field.name),
depth+3, reflow=False)
self.emit("goto failed;", depth+3)
self.emit("}", depth+2)
self.emit("asdl_seq_SET(%s, i, value);" % field.name, depth+2)
self.emit("}", depth+1)
else:
self.emit("res = obj2ast_%s(tmp, &%s, arena);" %
(field.type, field.name), depth+1)
self.emit("if (res != 0) goto failed;", depth+1)
self.emit("Py_CLEAR(tmp);", depth+1)
self.emit("} else {", depth)
if not field.opt:
message = "required field \\\"%s\\\" missing from %s" % (field.name, name)
format = "PyErr_SetString(PyExc_TypeError, \"%s\");"
self.emit(format % message, depth+1, reflow=False)
self.emit("return 1;", depth+1)
else:
if self.isNumeric(field):
self.emit("%s = 0;" % field.name, depth+1)
elif not self.isSimpleType(field):
self.emit("%s = NULL;" % field.name, depth+1)
else:
raise TypeError("could not determine the default value for %s" % field.name)
self.emit("}", depth)
class MarshalPrototypeVisitor(PickleVisitor):
def prototype(self, sum, name):
ctype = get_c_type(name)
self.emit("static int marshal_write_%s(PyObject **, int *, %s);"
% (name, ctype), 0)
visitProduct = visitSum = prototype
class PyTypesDeclareVisitor(PickleVisitor):
def visitProduct(self, prod, name):
self.emit("static PyTypeObject *%s_type;" % name, 0)
self.emit("static PyObject* ast2obj_%s(void*);" % name, 0)
if prod.attributes:
for a in prod.attributes:
self.emit_identifier(a.name)
self.emit("static char *%s_attributes[] = {" % name, 0)
for a in prod.attributes:
self.emit('"%s",' % a.name, 1)
self.emit("};", 0)
if prod.fields:
for f in prod.fields:
self.emit_identifier(f.name)
self.emit("static char *%s_fields[]={" % name,0)
for f in prod.fields:
self.emit('"%s",' % f.name, 1)
self.emit("};", 0)
def visitSum(self, sum, name):
self.emit("static PyTypeObject *%s_type;" % name, 0)
if sum.attributes:
for a in sum.attributes:
self.emit_identifier(a.name)
self.emit("static char *%s_attributes[] = {" % name, 0)
for a in sum.attributes:
self.emit('"%s",' % a.name, 1)
self.emit("};", 0)
ptype = "void*"
if is_simple(sum):
ptype = get_c_type(name)
tnames = []
for t in sum.types:
tnames.append(str(t.name)+"_singleton")
tnames = ", *".join(tnames)
self.emit("static PyObject *%s;" % tnames, 0)
self.emit("static PyObject* ast2obj_%s(%s);" % (name, ptype), 0)
for t in sum.types:
self.visitConstructor(t, name)
def visitConstructor(self, cons, name):
self.emit("static PyTypeObject *%s_type;" % cons.name, 0)
if cons.fields:
for t in cons.fields:
self.emit_identifier(t.name)
self.emit("static char *%s_fields[]={" % cons.name, 0)
for t in cons.fields:
self.emit('"%s",' % t.name, 1)
self.emit("};",0)
class PyTypesVisitor(PickleVisitor):
def visitModule(self, mod):
self.emit("""
typedef struct {
PyObject_HEAD
PyObject *dict;
} AST_object;
static void
ast_dealloc(AST_object *self)
{
/* bpo-31095: UnTrack is needed before calling any callbacks */
PyObject_GC_UnTrack(self);
Py_CLEAR(self->dict);
Py_TYPE(self)->tp_free(self);
}
static int
ast_traverse(AST_object *self, visitproc visit, void *arg)
{
Py_VISIT(self->dict);
return 0;
}
static void
ast_clear(AST_object *self)
{
Py_CLEAR(self->dict);
}
static int
ast_type_init(PyObject *self, PyObject *args, PyObject *kw)
{
_Py_IDENTIFIER(_fields);
Py_ssize_t i, numfields = 0;
int res = -1;
PyObject *key, *value, *fields;
fields = _PyObject_GetAttrId((PyObject*)Py_TYPE(self), &PyId__fields);
if (!fields)
PyErr_Clear();
if (fields) {
numfields = PySequence_Size(fields);
if (numfields == -1)
goto cleanup;
}
res = 0; /* if no error occurs, this stays 0 to the end */
if (PyTuple_GET_SIZE(args) > 0) {
if (numfields != PyTuple_GET_SIZE(args)) {
PyErr_Format(PyExc_TypeError, "%.400s constructor takes %s"
"%zd positional argument%s",
Py_TYPE(self)->tp_name,
numfields == 0 ? "" : "either 0 or ",
numfields, numfields == 1 ? "" : "s");
res = -1;
goto cleanup;
}
for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
/* cannot be reached when fields is NULL */
PyObject *name = PySequence_GetItem(fields, i);
if (!name) {
res = -1;
goto cleanup;
}
res = PyObject_SetAttr(self, name, PyTuple_GET_ITEM(args, i));
Py_DECREF(name);
if (res < 0)
goto cleanup;
}
}
if (kw) {
i = 0; /* needed by PyDict_Next */
while (PyDict_Next(kw, &i, &key, &value)) {
res = PyObject_SetAttr(self, key, value);
if (res < 0)
goto cleanup;
}
}
cleanup:
Py_XDECREF(fields);
return res;
}
/* Pickling support */
static PyObject *
ast_type_reduce(PyObject *self, PyObject *unused)
{
PyObject *res;
_Py_IDENTIFIER(__dict__);
PyObject *dict = _PyObject_GetAttrId(self, &PyId___dict__);
if (dict == NULL) {
if (PyErr_ExceptionMatches(PyExc_AttributeError))
PyErr_Clear();
else
return NULL;
}
if (dict) {
res = Py_BuildValue("O()O", Py_TYPE(self), dict);
Py_DECREF(dict);
return res;
}
return Py_BuildValue("O()", Py_TYPE(self));
}
static PyMethodDef ast_type_methods[] = {
{"__reduce__", ast_type_reduce, METH_NOARGS, NULL},
{NULL}
};
static PyGetSetDef ast_type_getsets[] = {
{"__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict},
{NULL}
};
static PyTypeObject AST_type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"_ast.AST",
sizeof(AST_object),
0,
(destructor)ast_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
PyObject_GenericSetAttr, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
(traverseproc)ast_traverse, /* tp_traverse */
(inquiry)ast_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
ast_type_methods, /* tp_methods */
0, /* tp_members */
ast_type_getsets, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
offsetof(AST_object, dict),/* tp_dictoffset */
(initproc)ast_type_init, /* tp_init */
PyType_GenericAlloc, /* tp_alloc */
PyType_GenericNew, /* tp_new */
PyObject_GC_Del, /* tp_free */
};
static PyTypeObject* make_type(char *type, PyTypeObject* base, char**fields, int num_fields)
{
PyObject *fnames, *result;
int i;
fnames = PyTuple_New(num_fields);
if (!fnames) return NULL;
for (i = 0; i < num_fields; i++) {
PyObject *field = PyUnicode_FromString(fields[i]);
if (!field) {
Py_DECREF(fnames);
return NULL;
}
PyTuple_SET_ITEM(fnames, i, field);
}
result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){sOss}",
type, base, "_fields", fnames, "__module__", "_ast");
Py_DECREF(fnames);
return (PyTypeObject*)result;
}
static int add_attributes(PyTypeObject* type, char**attrs, int num_fields)
{
int i, result;
_Py_IDENTIFIER(_attributes);
PyObject *s, *l = PyTuple_New(num_fields);
if (!l)
return 0;
for (i = 0; i < num_fields; i++) {
s = PyUnicode_FromString(attrs[i]);
if (!s) {
Py_DECREF(l);
return 0;
}
PyTuple_SET_ITEM(l, i, s);
}
result = _PyObject_SetAttrId((PyObject*)type, &PyId__attributes, l) >= 0;
Py_DECREF(l);
return result;
}
/* Conversion AST -> Python */
static PyObject* ast2obj_list(asdl_seq *seq, PyObject* (*func)(void*))
{
Py_ssize_t i, n = asdl_seq_LEN(seq);
PyObject *result = PyList_New(n);
PyObject *value;
if (!result)
return NULL;
for (i = 0; i < n; i++) {
value = func(asdl_seq_GET(seq, i));
if (!value) {
Py_DECREF(result);
return NULL;
}
PyList_SET_ITEM(result, i, value);
}
return result;
}
static PyObject* ast2obj_object(void *o)
{
if (!o)
o = Py_None;
Py_INCREF((PyObject*)o);
return (PyObject*)o;
}
#define ast2obj_singleton ast2obj_object
#define ast2obj_identifier ast2obj_object
#define ast2obj_string ast2obj_object
#define ast2obj_bytes ast2obj_object
static PyObject* ast2obj_int(long b)
{
return PyLong_FromLong(b);
}
/* Conversion Python -> AST */
static int obj2ast_singleton(PyObject *obj, PyObject** out, PyArena* arena)
{
if (obj != Py_None && obj != Py_True && obj != Py_False) {
PyErr_SetString(PyExc_ValueError,
"AST singleton must be True, False, or None");
return 1;
}
*out = obj;
return 0;
}
static int obj2ast_object(PyObject* obj, PyObject** out, PyArena* arena)
{
if (obj == Py_None)
obj = NULL;
if (obj) {
if (PyArena_AddPyObject(arena, obj) < 0) {
*out = NULL;
return -1;
}
Py_INCREF(obj);
}
*out = obj;
return 0;
}
static int obj2ast_identifier(PyObject* obj, PyObject** out, PyArena* arena)
{
if (!PyUnicode_CheckExact(obj) && obj != Py_None) {
PyErr_SetString(PyExc_TypeError, "AST identifier must be of type str");
return 1;
}
return obj2ast_object(obj, out, arena);
}
static int obj2ast_string(PyObject* obj, PyObject** out, PyArena* arena)
{
if (!PyUnicode_CheckExact(obj) && !PyBytes_CheckExact(obj)) {
PyErr_SetString(PyExc_TypeError, "AST string must be of type str");
return 1;
}
return obj2ast_object(obj, out, arena);
}
static int obj2ast_bytes(PyObject* obj, PyObject** out, PyArena* arena)
{
if (!PyBytes_CheckExact(obj)) {
PyErr_SetString(PyExc_TypeError, "AST bytes must be of type bytes");
return 1;
}
return obj2ast_object(obj, out, arena);
}
static int obj2ast_int(PyObject* obj, int* out, PyArena* arena)
{
int i;
if (!PyLong_Check(obj)) {
PyErr_Format(PyExc_ValueError, "invalid integer value: %R", obj);
return 1;
}
i = (int)PyLong_AsLong(obj);
if (i == -1 && PyErr_Occurred())
return 1;
*out = i;
return 0;
}
static int add_ast_fields(void)
{
PyObject *empty_tuple, *d;
if (PyType_Ready(&AST_type) < 0)
return -1;
d = AST_type.tp_dict;
empty_tuple = PyTuple_New(0);
if (!empty_tuple ||
PyDict_SetItemString(d, "_fields", empty_tuple) < 0 ||
PyDict_SetItemString(d, "_attributes", empty_tuple) < 0) {
Py_XDECREF(empty_tuple);
return -1;
}
Py_DECREF(empty_tuple);
return 0;
}
static int exists_not_none(PyObject *obj, _Py_Identifier *id)
{
int isnone;
PyObject *attr = _PyObject_GetAttrId(obj, id);
if (!attr) {
PyErr_Clear();
return 0;
}
isnone = attr == Py_None;
Py_DECREF(attr);
return !isnone;
}
""", 0, reflow=False)
self.emit("static int init_types(void)",0)
self.emit("{", 0)
self.emit("static int initialized;", 1)
self.emit("if (initialized) return 1;", 1)
self.emit("if (add_ast_fields() < 0) return 0;", 1)
for dfn in mod.dfns:
self.visit(dfn)
self.emit("initialized = 1;", 1)
self.emit("return 1;", 1);
self.emit("}", 0)
def visitProduct(self, prod, name):
if prod.fields:
fields = name+"_fields"
else:
fields = "NULL"
self.emit('%s_type = make_type("%s", &AST_type, %s, %d);' %
(name, name, fields, len(prod.fields)), 1)
self.emit("if (!%s_type) return 0;" % name, 1)
if prod.attributes:
self.emit("if (!add_attributes(%s_type, %s_attributes, %d)) return 0;" %
(name, name, len(prod.attributes)), 1)
else:
self.emit("if (!add_attributes(%s_type, NULL, 0)) return 0;" % name, 1)
def visitSum(self, sum, name):
self.emit('%s_type = make_type("%s", &AST_type, NULL, 0);' %
(name, name), 1)
self.emit("if (!%s_type) return 0;" % name, 1)
if sum.attributes:
self.emit("if (!add_attributes(%s_type, %s_attributes, %d)) return 0;" %
(name, name, len(sum.attributes)), 1)
else:
self.emit("if (!add_attributes(%s_type, NULL, 0)) return 0;" % name, 1)
simple = is_simple(sum)
for t in sum.types:
self.visitConstructor(t, name, simple)
def visitConstructor(self, cons, name, simple):
if cons.fields:
fields = cons.name+"_fields"
else:
fields = "NULL"
self.emit('%s_type = make_type("%s", %s_type, %s, %d);' %
(cons.name, cons.name, name, fields, len(cons.fields)), 1)
self.emit("if (!%s_type) return 0;" % cons.name, 1)
if simple:
self.emit("%s_singleton = PyType_GenericNew(%s_type, NULL, NULL);" %
(cons.name, cons.name), 1)
self.emit("if (!%s_singleton) return 0;" % cons.name, 1)
class ASTModuleVisitor(PickleVisitor):
def visitModule(self, mod):
self.emit("static struct PyModuleDef _astmodule = {", 0)
self.emit(' PyModuleDef_HEAD_INIT, "_ast"', 0)
self.emit("};", 0)
self.emit("PyMODINIT_FUNC", 0)
self.emit("PyInit__ast(void)", 0)
self.emit("{", 0)
self.emit("PyObject *m, *d;", 1)
self.emit("if (!init_types()) return NULL;", 1)
self.emit('m = PyModule_Create(&_astmodule);', 1)
self.emit("if (!m) return NULL;", 1)
self.emit("d = PyModule_GetDict(m);", 1)
self.emit('if (PyDict_SetItemString(d, "AST", (PyObject*)&AST_type) < 0) return NULL;', 1)
self.emit('if (PyModule_AddIntMacro(m, PyCF_ONLY_AST) < 0)', 1)
self.emit("return NULL;", 2)
for dfn in mod.dfns:
self.visit(dfn)
self.emit("return m;", 1)
self.emit("}", 0)
def visitProduct(self, prod, name):
self.addObj(name)
def visitSum(self, sum, name):
self.addObj(name)
for t in sum.types:
self.visitConstructor(t, name)
def visitConstructor(self, cons, name):
self.addObj(cons.name)
def addObj(self, name):
self.emit('if (PyDict_SetItemString(d, "%s", (PyObject*)%s_type) < 0) return NULL;' % (name, name), 1)
_SPECIALIZED_SEQUENCES = ('stmt', 'expr')
def find_sequence(fields, doing_specialization):
"""Return True if any field uses a sequence."""
for f in fields:
if f.seq:
if not doing_specialization:
return True
if str(f.type) not in _SPECIALIZED_SEQUENCES:
return True
return False
def has_sequence(types, doing_specialization):
for t in types:
if find_sequence(t.fields, doing_specialization):
return True
return False
class StaticVisitor(PickleVisitor):
CODE = '''Very simple, always emit this static code. Override CODE'''
def visit(self, object):
self.emit(self.CODE, 0, reflow=False)
class ObjVisitor(PickleVisitor):
def func_begin(self, name):
ctype = get_c_type(name)
self.emit("PyObject*", 0)
self.emit("ast2obj_%s(void* _o)" % (name), 0)
self.emit("{", 0)
self.emit("%s o = (%s)_o;" % (ctype, ctype), 1)
self.emit("PyObject *result = NULL, *value = NULL;", 1)
self.emit('if (!o) {', 1)
self.emit("Py_INCREF(Py_None);", 2)
self.emit('return Py_None;', 2)
self.emit("}", 1)
self.emit('', 0)
def func_end(self):
self.emit("return result;", 1)
self.emit("failed:", 0)
self.emit("Py_XDECREF(value);", 1)
self.emit("Py_XDECREF(result);", 1)
self.emit("return NULL;", 1)
self.emit("}", 0)
self.emit("", 0)
def visitSum(self, sum, name):
if is_simple(sum):
self.simpleSum(sum, name)
return
self.func_begin(name)
self.emit("switch (o->kind) {", 1)
for i in range(len(sum.types)):
t = sum.types[i]
self.visitConstructor(t, i + 1, name)
self.emit("}", 1)
for a in sum.attributes:
self.emit("value = ast2obj_%s(o->%s);" % (a.type, a.name), 1)
self.emit("if (!value) goto failed;", 1)
self.emit('if (_PyObject_SetAttrId(result, &PyId_%s, value) < 0)' % a.name, 1)
self.emit('goto failed;', 2)
self.emit('Py_DECREF(value);', 1)
self.func_end()
def simpleSum(self, sum, name):
self.emit("PyObject* ast2obj_%s(%s_ty o)" % (name, name), 0)
self.emit("{", 0)
self.emit("switch(o) {", 1)
for t in sum.types:
self.emit("case %s:" % t.name, 2)
self.emit("Py_INCREF(%s_singleton);" % t.name, 3)
self.emit("return %s_singleton;" % t.name, 3)
self.emit("default:", 2)
self.emit('/* should never happen, but just in case ... */', 3)
code = "PyErr_Format(PyExc_SystemError, \"unknown %s found\");" % name
self.emit(code, 3, reflow=False)
self.emit("return NULL;", 3)
self.emit("}", 1)
self.emit("}", 0)
def visitProduct(self, prod, name):
self.func_begin(name)
self.emit("result = PyType_GenericNew(%s_type, NULL, NULL);" % name, 1);
self.emit("if (!result) return NULL;", 1)
for field in prod.fields:
self.visitField(field, name, 1, True)
for a in prod.attributes:
self.emit("value = ast2obj_%s(o->%s);" % (a.type, a.name), 1)
self.emit("if (!value) goto failed;", 1)
self.emit('if (_PyObject_SetAttrId(result, &PyId_%s, value) < 0)' % a.name, 1)
self.emit('goto failed;', 2)
self.emit('Py_DECREF(value);', 1)
self.func_end()
def visitConstructor(self, cons, enum, name):
self.emit("case %s_kind:" % cons.name, 1)
self.emit("result = PyType_GenericNew(%s_type, NULL, NULL);" % cons.name, 2);
self.emit("if (!result) goto failed;", 2)
for f in cons.fields:
self.visitField(f, cons.name, 2, False)
self.emit("break;", 2)
def visitField(self, field, name, depth, product):
def emit(s, d):
self.emit(s, depth + d)
if product:
value = "o->%s" % field.name
else:
value = "o->v.%s.%s" % (name, field.name)
self.set(field, value, depth)
emit("if (!value) goto failed;", 0)
emit('if (_PyObject_SetAttrId(result, &PyId_%s, value) == -1)' % field.name, 0)
emit("goto failed;", 1)
emit("Py_DECREF(value);", 0)
def emitSeq(self, field, value, depth, emit):
emit("seq = %s;" % value, 0)
emit("n = asdl_seq_LEN(seq);", 0)
emit("value = PyList_New(n);", 0)
emit("if (!value) goto failed;", 0)
emit("for (i = 0; i < n; i++) {", 0)
self.set("value", field, "asdl_seq_GET(seq, i)", depth + 1)
emit("if (!value1) goto failed;", 1)
emit("PyList_SET_ITEM(value, i, value1);", 1)
emit("value1 = NULL;", 1)
emit("}", 0)
def set(self, field, value, depth):
if field.seq:
# XXX should really check for is_simple, but that requires a symbol table
if field.type == "cmpop":
# While the sequence elements are stored as void*,
# ast2obj_cmpop expects an enum
self.emit("{", depth)
self.emit("Py_ssize_t i, n = asdl_seq_LEN(%s);" % value, depth+1)
self.emit("value = PyList_New(n);", depth+1)
self.emit("if (!value) goto failed;", depth+1)
self.emit("for(i = 0; i < n; i++)", depth+1)
# This cannot fail, so no need for error handling
self.emit("PyList_SET_ITEM(value, i, ast2obj_cmpop((cmpop_ty)asdl_seq_GET(%s, i)));" % value,
depth+2, reflow=False)
self.emit("}", depth)
else:
self.emit("value = ast2obj_list(%s, ast2obj_%s);" % (value, field.type), depth)
else:
ctype = get_c_type(field.type)
self.emit("value = ast2obj_%s(%s);" % (field.type, value), depth, reflow=False)
class PartingShots(StaticVisitor):
CODE = """
PyObject* PyAST_mod2obj(mod_ty t)
{
if (!init_types())
return NULL;
return ast2obj_mod(t);
}
/* mode is 0 for "exec", 1 for "eval" and 2 for "single" input */
mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode)
{
mod_ty res;
PyObject *req_type[3];
char *req_name[] = {"Module", "Expression", "Interactive"};
int isinstance;
req_type[0] = (PyObject*)Module_type;
req_type[1] = (PyObject*)Expression_type;
req_type[2] = (PyObject*)Interactive_type;
assert(0 <= mode && mode <= 2);
if (!init_types())
return NULL;
isinstance = PyObject_IsInstance(ast, req_type[mode]);
if (isinstance == -1)
return NULL;
if (!isinstance) {
PyErr_Format(PyExc_TypeError, "expected %s node, got %.400s",
req_name[mode], Py_TYPE(ast)->tp_name);
return NULL;
}
if (obj2ast_mod(ast, &res, arena) != 0)
return NULL;
else
return res;
}
int PyAST_Check(PyObject* obj)
{
if (!init_types())
return -1;
return PyObject_IsInstance(obj, (PyObject*)&AST_type);
}
"""
class ChainOfVisitors:
def __init__(self, *visitors):
self.visitors = visitors
def visit(self, object):
for v in self.visitors:
v.visit(object)
v.emit("", 0)
common_msg = "/* File automatically generated by %s. */\n\n"
def main(srcfile, dump_module=False):
argv0 = sys.argv[0]
components = argv0.split(os.sep)
argv0 = os.sep.join(components[-2:])
auto_gen_msg = common_msg % argv0
mod = asdl.parse(srcfile)
if dump_module:
print('Parsed Module:')
print(mod)
if not asdl.check(mod):
sys.exit(1)
if INC_DIR:
p = "%s/%s-ast.h" % (INC_DIR, mod.name)
f = open(p, "w")
f.write(auto_gen_msg)
f.write('#include "asdl.h"\n\n')
c = ChainOfVisitors(TypeDefVisitor(f),
StructVisitor(f),
PrototypeVisitor(f),
)
c.visit(mod)
f.write("PyObject* PyAST_mod2obj(mod_ty t);\n")
f.write("mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode);\n")
f.write("int PyAST_Check(PyObject* obj);\n")
f.close()
if SRC_DIR:
p = os.path.join(SRC_DIR, str(mod.name) + "-ast.c")
f = open(p, "w")
f.write(auto_gen_msg)
f.write('#include <stddef.h>\n')
f.write('\n')
f.write('#include "Python.h"\n')
f.write('#include "%s-ast.h"\n' % mod.name)
f.write('\n')
f.write("static PyTypeObject AST_type;\n")
v = ChainOfVisitors(
PyTypesDeclareVisitor(f),
PyTypesVisitor(f),
Obj2ModPrototypeVisitor(f),
FunctionVisitor(f),
ObjVisitor(f),
Obj2ModVisitor(f),
ASTModuleVisitor(f),
PartingShots(f),
)
v.visit(mod)
f.close()
if __name__ == "__main__":
import getopt
INC_DIR = ''
SRC_DIR = ''
dump_module = False
opts, args = getopt.getopt(sys.argv[1:], "dh:c:")
for o, v in opts:
if o == '-h':
INC_DIR = v
if o == '-c':
SRC_DIR = v
if o == '-d':
dump_module = True
if INC_DIR and SRC_DIR:
print('Must specify exactly one output file')
sys.exit(1)
elif len(args) != 1:
print('Must specify single input file')
sys.exit(1)
main(args[0], dump_module)
|
|
#!/usr/bin/python
"""
Sahana-Eden ADAT helper script
==============================
Script to generate the import files for a new survey template.
The input is a "xls" spreadsheet with four sheets, namely:
* Template
* Sections
* Questions
* Layout
The output is two csv files one that can beused to import the
questions into the system, the other that may be used to import
the layout details into the system. The name of the files will the
same as the input file with either the .Layout.csv or .Question.csv prefix
replacing the .xls type.
Details of the input sheets
===========================
Template
--------
This includes the basic details of the template as follows:
A1: Template Name
A2: Template Description
A3: Complete Question
A4: Date Question
A5: Time Question
A6: Location Question
A7: Priority Question
The questions in cells A3:A7 are the unique questions codes which are given
later in sheet the Questions sheet.
Sections
--------
This lists each section within the template. Each section name is given in
the A column, they should be provided in their default order. Thier display
order can be later be changed by the layout but this will be the default
order of the sections.
Questions
---------
This holds details of questions in each column as follows:
A: Unique Question Code
B: The Section - the section to which the question belongs
C: Question Name - the actual question which is what will be displayed
and should be in the base template language (English)
D: Question type - This must be one of the known question widget types
E: Question notes - any help information that should be associated with
the question to help the enumerators complete
the questionnaire.
F onwards: Metadata
Any option type: The list of options, thus:
OptionOther: Early Continue
The Grid type: This is the most complex type when it comes to the metadata
and it relies on keywords to make it as simple as possible.
The columns alternate between keywords and their value.
The valid keywords are:
Subtitle - the subtitle of the grid [Optional]
Column count - the number of columns in the grid
Row count - the number of rows in the grid
Column Headings - the headings for each column with one
heading per cell
Row Questions - the headings for each row with one
heading per cell
Question Type - this is the type of each question, again
this must be one of the known question
widget types. If just one value is given
then all questions take this type. If the
same number of types as columns are given
then this type reflects the types used
in each column. Otherwise, their should
be a direct mapping between question and
type.
GridChild - Some of the question types need to have
metadata associated with them. This
keyword is followed by a number to
indicate which question type the metadata
is for, refering to the order in the
Question Type list. [Optional]
NOTE: The metadata must be provided in this order.
NOTE on the grid question type:
The question code for this should be unique and end with a hyphen.
The questions within the grid will then be properly numbered.
So a grid question code of PMI-WASH-A-, will then hold the questions
PMI-WASH-A-1, PMI-WASH-A-2, PMI-WASH-A-3 etc.
Layout
------
This is used to describe in a semi-visual way how the questions should be
laid out. This layout can be used for any representation of the
questionnaire such as web form, spreadsheet, PDF etc.
The rules to complete this section are as follows:
* Add the section name
* On subsequent lines add the question codes for the questions to appear
in this section
* For questions that are to appear in a the same line add them in adjacent
columns of the same row, thus:
PMI-Ass-1 PMI-Ass-2 PMI-Ass-3
* For questions that are to appear in adjacent columns use the keyword
column in the first column and then add the questions in subsequent
columns, thus:
columns PMI-Health-1 PMI-Health-4 PMI-Health-A-
PMI-Health-2
So this describes three columns with two questions in the first column
and one question each in columns 2 and 3.
* To add a subheading (normally at the start of a column) just add the
text in the cell and the question codes in the columns below. Any text
that does not match a question code or keyword is assumed to be a
subheading.
NOTE: The script might be able to manage blank lines between the end of
one section and the next but *please* try and avoid using blank lines
since this is not fully tested and future enhancements of this script
may break that.
NOTE: Only include questions codes from within the section. Including
questions from different sections is untested and whilst the script
may work as expected, Sahana-Eden *might* not.
"""
import sys
import xlrd
import csv
optionTypes = ["Option", "OptionOther", "MultiOption"]
widgetTypes = ["String", "Text", "Numeric", "Date", "Option", "YesNo", "YesNoDontKnow", "OptionOther", "MultiOption", "Location", "Link", "Grid", "GridChild"]
layoutQuestions = []
def splitGridChildMetadata(metadataList):
gridChildList = []
dataList = []
for x in range(len(metadataList)):
if metadataList[x] == "GridChild":
if dataList != []:
gridChildList.append(dataList)
dataList = []
else:
dataList.append(metadataList[x])
if dataList != []:
gridChildList.append(dataList)
return gridChildList
def processGridChildMetadata(metadataList, childType):
metadata = dict()
gridChildList = splitGridChildMetadata(metadataList)
for x in range(len(gridChildList)):
dataList = gridChildList[x]
qstnNo = int(dataList[0])
qstn_type = childType[qstnNo-1]
(metadataList, dummy) = processMetadata(dataList[1:], qstn_type, None,0,None)
metadata[qstnNo] = metadataList
return metadata
def processGridChildMetadataAll(metadataList, colCnt, rowCnt, qstn_code, qstn_posn, firstQstnInSection, childType):
metadata = dict()
qstnMetadataList = processGridChildMetadata(metadataList, childType)
offset = qstn_posn - firstQstnInSection + 1
for x in range(colCnt * rowCnt):
qCode = "%s%d" %(qstn_code, x+offset)
for qstnMetadata in qstnMetadataList.values():
metadata[str(qCode)] = qstnMetadata
return metadata
def processGridChildMetadataColumn(metadataList, colCnt, rowCnt, qstn_code, qstn_posn, firstQstnInSection, childType):
metadata = dict()
qstnMetadataList = processGridChildMetadata(metadataList, childType)
offset = qstn_posn - firstQstnInSection
for (posn, qstnMetadata) in qstnMetadataList.items():
for x in range(rowCnt):
qCode = "%s%d" %(qstn_code, x*colCnt+posn+offset)
metadata[str(qCode)] = qstnMetadata
return metadata
def processGridChildMetadataElement(metadataList, qstn_code, qstn_posn, firstQstnInSection, childType):
metadata = dict()
qstnMetadataList = processGridChildMetadata(metadataList, childType)
offset = qstn_posn - firstQstnInSection
for (posn, qstnMetadata) in qstnMetadataList.items():
qCode = "%s%d" %(qstn_code, posn+offset)
metadata[str(qCode)] = qstnMetadata
return metadata
def processMetadata(metadataList, qstn_type, qstn_code, qstn_posn, firstQstnInSection):
metadata = dict()
next_qstn_posn = qstn_posn + 1
if qstn_type in optionTypes:
posn = 0
for value in metadataList:
posn += 1
if value == "metadata":
metadata += processMetadata(metadataList[posn:],None,None,0,None)
break
metadata[posn] = str(value)
metadata["Length"] = posn
elif qstn_type == "Grid":
colCnt = 0
rowCnt = 0
metadata["QuestionNo"] = qstn_posn - firstQstnInSection + 1
end = len(metadataList)
for x in range(end):
value = metadataList[x]
if value == "Subtitle":
x += 1
metadata["Subtitle"] = str(metadataList[x])
elif value == "Column count":
x += 1
colCnt = int(metadataList[x])
metadata["col-cnt"] = str(colCnt)
elif value == "Row count":
x += 1
rowCnt = int(metadataList[x])
metadata["row-cnt"] = str(rowCnt)
elif value == "Column Headings":
colList = []
for y in range(colCnt):
colList.append(str(metadataList[x+y+1]))
metadata["columns"] = colList
x += colCnt
elif value == "Row Questions":
rowList = []
for y in range(rowCnt):
rowList.append(str(metadataList[x+y+1]))
metadata["rows"] = rowList
x += rowCnt
elif value == "Question Type":
rowList = []
childType = []
for y in xrange(x+1, end):
value = metadataList[y]
if value == "GridChild":
break
else:
childType.append(str(value))
if len(childType) == 1:
colList = childType*colCnt
rowList = [colList] * rowCnt
metadata["data"] = rowList
elif len(childType) == colCnt:
for r in range(rowCnt):
rowList.append(childType)
metadata["data"] = rowList
else:
for r in range(rowCnt):
colList = []
for c in range(colCnt):
colList.append(childType[r*colCnt + c])
rowList.append(colList)
metadata["data"] = rowList
if value == "GridChild":
if len(childType) == 1:
metadata.update(processGridChildMetadataAll(metadataList[y:], colCnt, rowCnt, qstn_code, qstn_posn, firstQstnInSection, childType))
elif len(childType) == colCnt:
metadata.update(processGridChildMetadataColumn(metadataList[y:], colCnt, rowCnt, qstn_code, qstn_posn, firstQstnInSection, childType))
else:
metadata.update(processGridChildMetadataElement(metadataList[y:], qstn_code, qstn_posn, firstQstnInSection, childType))
break
next_qstn_posn = qstn_posn + colCnt * rowCnt
else:
pass
return (metadata, next_qstn_posn)
def getQstnMetadata(sheetQ, row, qstn_type, qstn_code, qstn_posn, firstQstnInSection):
metadataList = []
for col in xrange(5,sheetQ.ncols):
value = sheetQ.cell_value(row, col)
if value == "":
break
metadataList.append(value)
(metadata, qstn_posn) = processMetadata(metadataList, qstn_type, qstn_code, qstn_posn, firstQstnInSection)
return (metadata, qstn_posn)
def formatQuestionnaire(sheetQ, templateDetails, sections):
questionnaire = []
questions = []
theSection = ""
sectionPosn = 0
firstQstnInSection = 0
next_qstn_posn = 1
line = []
for row in range(sheetQ.nrows):
qstn_posn = next_qstn_posn
line = templateDetails[:]
qstn_code = sheetQ.cell_value(row, 0)
section = sheetQ.cell_value(row, 1)
if section != theSection:
theSection = section
sectionPosn += 1
firstQstnInSection = qstn_posn
question = sheetQ.cell_value(row, 2)
qstn_type = sheetQ.cell_value(row, 3)
qstn_notes = sheetQ.cell_value(row, 4)
(metadata, next_qstn_posn) = getQstnMetadata(sheetQ, row, qstn_type, qstn_code, qstn_posn, firstQstnInSection)
questions.append(qstn_code)
line.append(section)
line.append(sectionPosn)
line.append(question)
line.append(qstn_type)
line.append(qstn_notes)
line.append(qstn_posn)
line.append(qstn_code)
if metadata != {}:
line.append(metadata)
questionnaire.append(line)
return (questions, questionnaire)
def processColumns(sheetL, questions, rowStart, rowEnd):
columns = []
for col in xrange(1,sheetL.ncols):
colList = []
for row in xrange(rowStart, rowEnd):
value = sheetL.cell_value(row, col)
if value == "":
break
if value in questions:
colList.append(str(value))
layoutQuestions.append(value)
else:
colList.append(processLabel(value))
if colList == []:
break
columns.append(colList)
return [{'columns':columns}]
def processRow(sheetL, questions, row):
rowList = []
for col in range(sheetL.ncols):
value = sheetL.cell_value(row, col)
if value in questions:
rowList.append(str(value))
layoutQuestions.append(value)
return rowList
def processLabel(value):
return {'heading':str(value)}
def getLayoutRules(sheetL, questions, rowStart, rowEnd):
colStart = None
rules = []
for row in xrange(rowStart, rowEnd):
value = sheetL.cell_value(row, 0)
if value == "columns":
if colStart != None:
rules.append(processColumns(sheetL, questions, colStart, row))
colStart = row
elif value == "":
pass
elif value in questions:
if colStart != None:
rules.append(processColumns(sheetL, questions, colStart, row))
colStart = None
rules.append(processRow(sheetL, questions, row))
else:
rules.append(processLabel(value))
if colStart != None:
rules.append(processColumns(sheetL, questions, colStart, rowEnd))
return rules
def formatLayout(sheetL, template, sections, questions):
layoutMethod = 1
layout = []
sectionLength = len(sections)
rowStart = rowEnd = 0
rowLimit = sheetL.nrows
for i in range(sectionLength):
section = sections[i]
while rowStart < rowLimit:
if sheetL.cell_value(rowStart, 0) == section:
break
else:
rowStart += 1
if i+1 == sectionLength:
rowEnd = rowLimit
else:
nextSection = sections[i+1]
while rowEnd < rowLimit:
if sheetL.cell_value(rowEnd, 0) == nextSection:
break
else:
rowEnd += 1
rule = repr(getLayoutRules(sheetL, questions, rowStart+1, rowEnd))
layout.append([template,section,i+1,layoutMethod,rule])
return layout
def loadSpreadsheet(name):
workbook = xlrd.open_workbook(filename=name)
sheetT = workbook.sheet_by_name("Template")
sheetS = workbook.sheet_by_name("Sections")
sheetQ = workbook.sheet_by_name("Questions")
sheetL = workbook.sheet_by_name("Layout")
templateDetails = []
for row in xrange(0, sheetT.nrows):
templateDetails.append(sheetT.cell_value(row, 0))
sections = []
for row in xrange(0, sheetS.nrows):
sections.append(sheetS.cell_value(row, 0))
(questions, questionnaire) = formatQuestionnaire(sheetQ, templateDetails, sections)
layout = formatLayout(sheetL, templateDetails[0], sections, questions)
# Report back the questions that are not in the layout
missing = []
for qstn in questions:
if qstn not in layoutQuestions:
missing.append(qstn)
if missing != []:
print "The following questions are missing from the layout: %s" % missing
return (questionnaire, layout)
def generateQuestionnaireCSV(name, questionnaire):
csvName = "%s.Question.csv" % name
headings = ["Template","Template Description","Complete Question","Date Question","Time Question","Location Question","Priority Question","Section","Section Position","Question","Question Type","Question Notes","Question Position","Question Code","Meta Data"]
writer = csv.writer(open(csvName, "w"))
writer.writerows([headings])
writer.writerows(questionnaire)
pass
def generateLayoutCSV(name, layout):
csvName = "%s.Layout.csv" % name
headings = ["Template","Section","Posn","Method","Rules"]
writer = csv.writer(open(csvName, "w"))
writer.writerows([headings])
writer.writerows(layout)
def _main():
"""
Parse arguments and run checks generate the csv files
"""
if len(sys.argv) == 1:
print "Please add a spreadsheet to process"
return
spreadsheetName = sys.argv[1]
(questionnaire, layout) = loadSpreadsheet(spreadsheetName)
generateQuestionnaireCSV(spreadsheetName, questionnaire)
generateLayoutCSV(spreadsheetName, layout)
if __name__ == '__main__':
_main()
|
|
import os
import sys
import time
import pickle
import threading
import traceback
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from simple_host_target.client import Client
from simple_host_target.server import Server
from simple_host_target.definition import HOST_PORT, TARGET_PORT,\
get_local_IP, OP_SH_DATA_PREFIX,\
OP_SH_DATA_POSTFIX, OP_SH_DATA_MIDFIX,\
OP_HT_DATA_BEGIN, OP_HT_DATA_END, OP_HT_DATA_MID
from simple_host_target.generaltaskthread import TaskThread, Task
class ResultJob2SenderTask(Task):
def __init__(self, host, serialized_result_wrapper):
Task.__init__(self)
self.host = host
self.serialized_result_wrapper = serialized_result_wrapper
def run(self):
print("[Host][Thread] sending result to sender !")
c = None
try:
rw = pickle.loads(self.serialized_result_wrapper)
sh_ip_pairs = self.host.dicToken2Pairs.pop(rw.token, "")
used_target_ip = self.host.dicTokenIP.pop(rw.token, None)
assert used_target_ip != None
self.host.return_target_ip(used_target_ip)
sender_ip = sh_ip_pairs.get("sender_ip", "")
sender_port = sh_ip_pairs.get("sender_port", 0)
c = Client(ip = sender_ip, port = sender_port)
c.send_sh_data("", self.serialized_result_wrapper)
except:
traceback.print_exc()
print("[Host][Thread][Exception] while sending result task !")
finally:
if c:
c.shutdown()
class ExecJob2TargetTask(Task):
def __init__(self, host, target_ip, ip_port_pairs, serialized_executor_wrapper):
Task.__init__(self)
self.host = host
self.target_ip = target_ip
self.ip_port_pairs = ip_port_pairs
self.serialized_executor_wrapper = serialized_executor_wrapper
pass
def run(self):
print("[Host][Thread] sending task to target !")
c = None
try:
ew = pickle.loads(self.serialized_executor_wrapper)
print(self.target_ip)
c = Client(ip = self.target_ip, port = TARGET_PORT)
data = { "cmd" : ew.get_command(),
"sew" : self.serialized_executor_wrapper }
c.send_ht_data(repr(data))
self.host.dicTokenIP[ew.token] = self.target_ip
self.host.dicToken2Pairs[ew.token] = self.ip_port_pairs
except:
traceback.print_exc()
print("[Host][Thread][Exception] while sending execution task !")
finally:
if c:
c.shutdown()
class ExecutionHost(object):
def __init__(self, IP):
self.host_IP = IP
self.target_IPs = set()
self.dicTokenIP = {}
self.dicToken2Pairs = {}
self.dicSender2Targets = {}
self.pendings = []
self.lock = threading.Lock()
self.thread = TaskThread(name = "host_thread")
self.thread.daemon = True
self.thread.start()
def setup_target_IPs(self, target_IPs):
assert(type(target_IPs) == list and len(target_IPs) > 0), "Must be a list and size > 0."
self.target_IPs = set(target_IPs)
def __ensure_target_IPs(self):
if len(self.target_IPs) == 0:
print("Empty target IPs, you should call setup_target_IPs before run !!")
print("Enter at least one Target IP or a list of IPs, e.g. 1.1.1.1, 2.3.3.4, 2.1.5.6")
print("... or enter yes to use host's IP")
try:
target_IPs = sys.stdin.readline()
if "yes" in target_IPs.strip():
target_IP = self.host_IP
self.target_IPs.add(target_IP)
else:
IPs = target_IPs.split(",")
for ip in IPs:
self.target_IPs.add(ip.strip())
except:
print("Something wrong while processing target IP, exit !")
sys.exit(1)
def run(self):
self.__ensure_target_IPs()
self.server = Server(ip = self.host_IP, port = HOST_PORT)
self.server.run_server(callbacks_info = { 0 : { "pre" : OP_HT_DATA_BEGIN,
"post": OP_HT_DATA_END,
"mid" : OP_HT_DATA_MID,
"callback" : self.__recv_from_target },
1 : { "pre" : OP_SH_DATA_PREFIX,
"post" : OP_SH_DATA_POSTFIX,
"mid" : OP_SH_DATA_MIDFIX,
"callback" : self.__recv_from_sender }})
print("Host is running ...")
while 1:
try:
time.sleep(0.01)
except:
traceback.print_exc()
break
self.__shutdown()
def __shutdown(self):
print("[Host] shutdown ... begin")
if len(self.pendings):
print("[Host][Warning] pending jobs are gonna be dropped !")
self.pendings = []
if self.thread:
self.thread.stop()
self.thread = None
if self.server:
self.server.shutdown()
self.server = None
print("[Host] shutdown ... end")
def retrieve_target_ip(self):
with self.lock:
# t_ip = self.target_IPs.pop() if len(self.target_IPs) else None
# TODO : target IP may be reused as we're designing command+task system.
t_ip = list(self.target_IPs)[0] if len(self.target_IPs) else None
return t_ip
def return_target_ip(self, ip):
with self.lock:
self.target_IPs.add(ip)
# self.__retrigger_pending_jobs()
def __retrigger_pending_jobs(self):
if len(self.pendings):
t_ip = self.retrieve_target_ip()
if t_ip != None:
dict_IP_pairs, serialized_executor_wrapper = self.pendings.pop(0)
job = ExecJob2TargetTask(self, t_ip, dict_IP_pairs, serialized_executor_wrapper)
self.thread.addtask(job)
def __recv_from_sender(self, ip_port_pairs, serialized_executor_wrapper):
dict_IP_pairs = eval(ip_port_pairs.decode("ASCII"))
t_ip = self.retrieve_target_ip()
if t_ip == None:
print("No available target for new job. Will try later !!")
self.pendings.append((dict_IP_pairs, serialized_executor_wrapper))
return
sender_ip = dict_IP_pairs.get("sender_ip", "")
sender_port = dict_IP_pairs.get("sender_port", 0)
if t_ip not in self.dicSender2Targets.setdefault((sender_ip, sender_port), []):
self.dicSender2Targets[(sender_ip, sender_port)].append(t_ip)
job = ExecJob2TargetTask(self, t_ip, dict_IP_pairs, serialized_executor_wrapper)
self.thread.addtask(job)
def __recv_from_target(self, serialized_result_wrapper):
print("[Host] get result : %s "%(str(serialized_result_wrapper)))
job = ResultJob2SenderTask(self, serialized_result_wrapper)
self.thread.addtask(job)
def create_host():
host_ip = get_local_IP()
print("Creating host @(%s) ... Proceed (yes/no)?"%(host_ip))
try:
msg = sys.stdin.readline()
if msg.lower().find('yes') >= 0:
host = ExecutionHost(host_ip)
return host
except:
traceback.print_exc()
print("Nothing created")
return None
if __name__ == "__main__":
host = create_host()
if host:
host.run()
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo.config import cfg
from stevedore import driver
from stevedore import extension
from glance_store import capabilities
from glance_store.common import utils
from glance_store import exceptions
from glance_store import i18n
from glance_store import location
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_STORE_OPTS = [
cfg.ListOpt('stores', default=['file', 'http'],
help=_('List of stores enabled')),
cfg.StrOpt('default_store', default='file',
help=_("Default scheme to use to store image data. The "
"scheme must be registered by one of the stores "
"defined by the 'stores' config option.")),
cfg.IntOpt('store_capabilities_update_min_interval', default=0,
help=_("Minimum interval seconds to execute updating "
"dynamic storage capabilities based on backend "
"status then. It's not a periodic routine, the "
"update logic will be executed only when interval "
"seconds elapsed and an operation of store has "
"triggered. The feature will be enabled only when "
"the option value greater then zero."))
]
_STORE_CFG_GROUP = 'glance_store'
def _list_opts():
driver_opts = []
mgr = extension.ExtensionManager('glance_store.drivers')
# NOTE(zhiyan): Handle available drivers entry_points provided
drivers = [ext.name for ext in mgr]
handled_drivers = [] # Used to handle backwards-compatible entries
for store_entry in drivers:
driver_cls = _load_store(None, store_entry, False)
if driver_cls and driver_cls not in handled_drivers:
if getattr(driver_cls, 'OPTIONS', None) is not None:
driver_opts += driver_cls.OPTIONS
handled_drivers.append(driver_cls)
# NOTE(zhiyan): This separated approach could list
# store options before all driver ones, which easier
# to read and configure by operator.
return ([(_STORE_CFG_GROUP, _STORE_OPTS)] +
[(_STORE_CFG_GROUP, driver_opts)])
def register_opts(conf):
opts = _list_opts()
for group, opt_list in opts:
LOG.debug("Registering options for group %s" % group)
for opt in opt_list:
conf.register_opt(opt, group=group)
class Indexable(object):
"""Indexable for file-like objs iterators
Wrapper that allows an iterator or filelike be treated as an indexable
data structure. This is required in the case where the return value from
Store.get() is passed to Store.add() when adding a Copy-From image to a
Store where the client library relies on eventlet GreenSockets, in which
case the data to be written is indexed over.
"""
def __init__(self, wrapped, size):
"""
Initialize the object
:param wrappped: the wrapped iterator or filelike.
:param size: the size of data available
"""
self.wrapped = wrapped
self.size = int(size) if size else (wrapped.len
if hasattr(wrapped, 'len') else 0)
self.cursor = 0
self.chunk = None
def __iter__(self):
"""
Delegate iteration to the wrapped instance.
"""
for self.chunk in self.wrapped:
yield self.chunk
def __getitem__(self, i):
"""
Index into the next chunk (or previous chunk in the case where
the last data returned was not fully consumed).
:param i: a slice-to-the-end
"""
start = i.start if isinstance(i, slice) else i
if start < self.cursor:
return self.chunk[(start - self.cursor):]
self.chunk = self.another()
if self.chunk:
self.cursor += len(self.chunk)
return self.chunk
def another(self):
"""Implemented by subclasses to return the next element."""
raise NotImplementedError
def getvalue(self):
"""
Return entire string value... used in testing
"""
return self.wrapped.getvalue()
def __len__(self):
"""
Length accessor.
"""
return self.size
def _load_store(conf, store_entry, invoke_load=True):
try:
LOG.debug("Attempting to import store %s", store_entry)
mgr = driver.DriverManager('glance_store.drivers',
store_entry,
invoke_args=[conf],
invoke_on_load=invoke_load)
return mgr.driver
except RuntimeError as e:
LOG.warn("Failed to load driver %(driver)s."
"The driver will be disabled" % dict(driver=str([driver, e])))
def _load_stores(conf):
for store_entry in set(conf.glance_store.stores):
try:
# FIXME(flaper87): Don't hide BadStoreConfiguration
# exceptions. These exceptions should be propagated
# to the user of the library.
store_instance = _load_store(conf, store_entry)
if not store_instance:
continue
yield (store_entry, store_instance)
except exceptions.BadStoreConfiguration:
continue
def create_stores(conf=CONF):
"""
Registers all store modules and all schemes
from the given config. Duplicates are not re-registered.
"""
store_count = 0
for (store_entry, store_instance) in _load_stores(conf):
try:
schemes = store_instance.get_schemes()
store_instance.configure()
except NotImplementedError:
continue
if not schemes:
raise exceptions.BackendException('Unable to register store %s. '
'No schemes associated with it.'
% store_entry)
else:
LOG.debug("Registering store %s with schemes %s",
store_entry, schemes)
scheme_map = {}
loc_cls = store_instance.get_store_location_class()
for scheme in schemes:
scheme_map[scheme] = {
'store': store_instance,
'location_class': loc_cls,
'store_entry': store_entry
}
location.register_scheme_map(scheme_map)
store_count += 1
return store_count
def verify_default_store():
scheme = CONF.glance_store.default_store
try:
get_store_from_scheme(scheme)
except exceptions.UnknownScheme:
msg = _("Store for scheme %s not found") % scheme
raise RuntimeError(msg)
def get_known_schemes():
"""Returns list of known schemes."""
return location.SCHEME_TO_CLS_MAP.keys()
def get_store_from_scheme(scheme):
"""
Given a scheme, return the appropriate store object
for handling that scheme.
"""
if scheme not in location.SCHEME_TO_CLS_MAP:
raise exceptions.UnknownScheme(scheme=scheme)
scheme_info = location.SCHEME_TO_CLS_MAP[scheme]
store = scheme_info['store']
if not store.is_capable(capabilities.DRIVER_REUSABLE):
# Driver instance isn't stateless so it can't
# be reused safely and need recreation.
store_entry = scheme_info['store_entry']
store = _load_store(store.conf, store_entry, invoke_load=True)
store.configure()
try:
scheme_map = {}
loc_cls = store.get_store_location_class()
for scheme in store.get_schemes():
scheme_map[scheme] = {
'store': store,
'location_class': loc_cls,
'store_entry': store_entry
}
location.register_scheme_map(scheme_map)
except NotImplementedError:
scheme_info['store'] = store
return store
def get_store_from_uri(uri):
"""
Given a URI, return the store object that would handle
operations on the URI.
:param uri: URI to analyze
"""
scheme = uri[0:uri.find('/') - 1]
return get_store_from_scheme(scheme)
def get_from_backend(uri, offset=0, chunk_size=None, context=None):
"""Yields chunks of data from backend specified by uri."""
loc = location.get_location_from_uri(uri, conf=CONF)
store = get_store_from_uri(uri)
return store.get(loc, offset=offset,
chunk_size=chunk_size,
context=context)
def get_size_from_backend(uri, context=None):
"""Retrieves image size from backend specified by uri."""
loc = location.get_location_from_uri(uri, conf=CONF)
store = get_store_from_uri(uri)
return store.get_size(loc, context=context)
def delete_from_backend(uri, context=None):
"""Removes chunks of data from backend specified by uri."""
loc = location.get_location_from_uri(uri, conf=CONF)
store = get_store_from_uri(uri)
return store.delete(loc, context=context)
def get_store_from_location(uri):
"""
Given a location (assumed to be a URL), attempt to determine
the store from the location. We use here a simple guess that
the scheme of the parsed URL is the store...
:param uri: Location to check for the store
"""
loc = location.get_location_from_uri(uri, conf=CONF)
return loc.store_name
def check_location_metadata(val, key=''):
if isinstance(val, dict):
for key in val:
check_location_metadata(val[key], key=key)
elif isinstance(val, list):
ndx = 0
for v in val:
check_location_metadata(v, key='%s[%d]' % (key, ndx))
ndx = ndx + 1
elif not isinstance(val, unicode):
raise exceptions.BackendException(_("The image metadata key %(key)s "
"has an invalid type of %(type)s. "
"Only dict, list, and unicode are "
"supported.")
% dict(key=key, type=type(val)))
def store_add_to_backend(image_id, data, size, store, context=None):
"""
A wrapper around a call to each stores add() method. This gives glance
a common place to check the output
:param image_id: The image add to which data is added
:param data: The data to be stored
:param size: The length of the data in bytes
:param store: The store to which the data is being added
:return: The url location of the file,
the size amount of data,
the checksum of the data
the storage systems metadata dictionary for the location
"""
(location, size, checksum, metadata) = store.add(image_id,
data,
size,
context=context)
if metadata is not None:
if not isinstance(metadata, dict):
msg = (_("The storage driver %(driver)s returned invalid "
" metadata %(metadata)s. This must be a dictionary type")
% dict(driver=str(store), metadata=str(metadata)))
LOG.error(msg)
raise exceptions.BackendException(msg)
try:
check_location_metadata(metadata)
except exceptions.BackendException as e:
e_msg = (_("A bad metadata structure was returned from the "
"%(driver)s storage driver: %(metadata)s. %(e)s.") %
dict(driver=utils.exception_to_str(store),
metadata=utils.exception_to_str(metadata),
e=utils.exception_to_str(e)))
LOG.error(e_msg)
raise exceptions.BackendException(e_msg)
return (location, size, checksum, metadata)
def add_to_backend(conf, image_id, data, size, scheme=None, context=None):
if scheme is None:
scheme = conf['glance_store']['default_store']
store = get_store_from_scheme(scheme)
return store_add_to_backend(image_id, data, size, store, context)
def set_acls(location_uri, public=False, read_tenants=[],
write_tenants=None, context=None):
if write_tenants is None:
write_tenants = []
loc = location.get_location_from_uri(location_uri, conf=CONF)
scheme = get_store_from_location(location_uri)
store = get_store_from_scheme(scheme)
try:
store.set_acls(loc, public=public,
read_tenants=read_tenants,
write_tenants=write_tenants,
context=context)
except NotImplementedError:
LOG.debug(_("Skipping store.set_acls... not implemented."))
|
|
import os.path
import sys
try:
this_file_name = __file__
except NameError:
# stupid jython. plain old __file__ isnt working for some reason
import test_runfiles #@UnresolvedImport - importing the module itself
this_file_name = test_runfiles.__file__
desired_runfiles_path = os.path.normpath(os.path.dirname(this_file_name) + "/..")
sys.path.insert(0, desired_runfiles_path)
import pydev_runfiles_unittest
import pydev_runfiles_xml_rpc
import pydevd_io
#remove existing pydev_runfiles from modules (if any), so that we can be sure we have the correct version
if 'pydev_runfiles' in sys.modules:
del sys.modules['pydev_runfiles']
import pydev_runfiles
import unittest
import tempfile
import re
try:
set
except:
from sets import Set as set
#this is an early test because it requires the sys.path changed
orig_syspath = sys.path
a_file = pydev_runfiles.__file__
pydev_runfiles.PydevTestRunner(pydev_runfiles.Configuration(files_or_dirs=[a_file]))
file_dir = os.path.dirname(a_file)
assert file_dir in sys.path
sys.path = orig_syspath[:]
#remove it so that we leave it ok for other tests
sys.path.remove(desired_runfiles_path)
class RunfilesTest(unittest.TestCase):
def _setup_scenario(
self,
path,
include_tests=None,
tests=None,
files_to_tests=None,
exclude_files=None,
exclude_tests=None,
include_files=None,
):
self.MyTestRunner = pydev_runfiles.PydevTestRunner(
pydev_runfiles.Configuration(
files_or_dirs=path,
include_tests=include_tests,
verbosity=1,
tests=tests,
files_to_tests=files_to_tests,
exclude_files=exclude_files,
exclude_tests=exclude_tests,
include_files=include_files,
)
)
self.files = self.MyTestRunner.find_import_files()
self.modules = self.MyTestRunner.find_modules_from_files(self.files)
self.all_tests = self.MyTestRunner.find_tests_from_modules(self.modules)
self.filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
def setUp(self):
self.file_dir = [os.path.abspath(os.path.join(desired_runfiles_path, 'tests_runfiles/samples'))]
self._setup_scenario(self.file_dir, None)
def test_suite_used(self):
for suite in self.all_tests+self.filtered_tests:
self.assert_(isinstance(suite, pydev_runfiles_unittest.PydevTestSuite))
def test_parse_cmdline(self):
sys.argv = "pydev_runfiles.py ./".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals([sys.argv[1]], configuration.files_or_dirs)
self.assertEquals(2, configuration.verbosity) # default value
self.assertEquals(None, configuration.include_tests) # default value
sys.argv = "pydev_runfiles.py ../images c:/temp".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals(sys.argv[1:3], configuration.files_or_dirs)
self.assertEquals(2, configuration.verbosity)
sys.argv = "pydev_runfiles.py --verbosity 3 ../junk c:/asdf ".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals(sys.argv[3:], configuration.files_or_dirs)
self.assertEquals(int(sys.argv[2]), configuration.verbosity)
sys.argv = "pydev_runfiles.py --include_tests test_def ./".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals([sys.argv[-1]], configuration.files_or_dirs)
self.assertEquals([sys.argv[2]], configuration.include_tests)
sys.argv = "pydev_runfiles.py --include_tests Abc.test_def,Mod.test_abc c:/junk/".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals([sys.argv[-1]], configuration.files_or_dirs)
self.assertEquals(sys.argv[2].split(','), configuration.include_tests)
sys.argv = ('C:\\eclipse-SDK-3.2-win32\\eclipse\\plugins\\org.python.pydev.debug_1.2.2\\pysrc\\pydev_runfiles.py ' +
'--verbosity 1 ' +
'C:\\workspace_eclipse\\fronttpa\\tests\\gui_tests\\calendar_popup_control_test.py ').split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals([sys.argv[-1]], configuration.files_or_dirs)
self.assertEquals(1, configuration.verbosity)
sys.argv = "pydev_runfiles.py --verbosity 1 --include_tests Mod.test_abc c:/junk/ ./".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals(sys.argv[5:], configuration.files_or_dirs)
self.assertEquals(int(sys.argv[2]), configuration.verbosity)
self.assertEquals([sys.argv[4]], configuration.include_tests)
sys.argv = "pydev_runfiles.py --exclude_files=*.txt,a*.py".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals(['*.txt', 'a*.py'], configuration.exclude_files)
sys.argv = "pydev_runfiles.py --exclude_tests=*__todo,test*bar".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals(['*__todo', 'test*bar'], configuration.exclude_tests)
def test___adjust_python_path_works_for_directories(self):
orig_syspath = sys.path
tempdir = tempfile.gettempdir()
pydev_runfiles.PydevTestRunner(pydev_runfiles.Configuration(files_or_dirs=[tempdir]))
self.assertEquals(1, tempdir in sys.path)
sys.path = orig_syspath[:]
def test___adjust_python_path_breaks_for_unkown_type(self):
self.assertRaises(RuntimeError, pydev_runfiles.PydevTestRunner, pydev_runfiles.Configuration(["./LIKE_THE_NINJA_YOU_WONT_FIND_ME.txt"]))
def test___is_valid_py_file(self):
isvalid = self.MyTestRunner._PydevTestRunner__is_valid_py_file
self.assertEquals(1, isvalid("test.py"))
self.assertEquals(0, isvalid("asdf.pyc"))
self.assertEquals(0, isvalid("__init__.py"))
self.assertEquals(0, isvalid("__init__.pyc"))
self.assertEquals(1, isvalid("asdf asdf.pyw"))
def test___unixify(self):
unixify = self.MyTestRunner._PydevTestRunner__unixify
self.assertEquals("c:/temp/junk/asdf.py", unixify("c:SEPtempSEPjunkSEPasdf.py".replace('SEP', os.sep)))
def test___importify(self):
importify = self.MyTestRunner._PydevTestRunner__importify
self.assertEquals("temp.junk.asdf", importify("temp/junk/asdf.py"))
self.assertEquals("asdf", importify("asdf.py"))
self.assertEquals("abc.def.hgi", importify("abc/def/hgi"))
def test_finding_a_file_from_file_system(self):
test_file = "simple_test.py"
self.MyTestRunner.files_or_dirs = [self.file_dir[0] + test_file]
files = self.MyTestRunner.find_import_files()
self.assertEquals(1, len(files))
self.assertEquals(files[0], self.file_dir[0] + test_file)
def test_finding_files_in_dir_from_file_system(self):
self.assertEquals(1, len(self.files) > 0)
for import_file in self.files:
self.assertEquals(-1, import_file.find(".pyc"))
self.assertEquals(-1, import_file.find("__init__.py"))
self.assertEquals(-1, import_file.find("\\"))
self.assertEquals(-1, import_file.find(".txt"))
def test___get_module_from_str(self):
my_importer = self.MyTestRunner._PydevTestRunner__get_module_from_str
my_os_path = my_importer("os.path", True, 'unused')
from os import path
import os.path as path2
self.assertEquals(path, my_os_path)
self.assertEquals(path2, my_os_path)
self.assertNotEquals(__import__("os.path"), my_os_path)
self.assertNotEquals(__import__("os"), my_os_path)
def test_finding_modules_from_import_strings(self):
self.assertEquals(1, len(self.modules) > 0)
def test_finding_tests_when_no_filter(self):
# unittest.py will create a TestCase with 0 tests in it
# since it just imports what is given
self.assertEquals(1, len(self.all_tests) > 0)
files_with_tests = [1 for t in self.all_tests if len(t._tests) > 0]
self.assertNotEquals(len(self.files), len(files_with_tests))
def count_tests(self, tests):
total = 0
for t in tests:
total += t.countTestCases()
return total
def test___match(self):
matcher = self.MyTestRunner._PydevTestRunner__match
self.assertEquals(1, matcher(None, "aname"))
self.assertEquals(1, matcher([".*"], "aname"))
self.assertEquals(0, matcher(["^x$"], "aname"))
self.assertEquals(0, matcher(["abc"], "aname"))
self.assertEquals(1, matcher(["abc", "123"], "123"))
def test_finding_tests_from_modules_with_bad_filter_returns_0_tests(self):
self._setup_scenario(self.file_dir, ["NO_TESTS_ARE_SURE_TO_HAVE_THIS_NAME"])
self.assertEquals(0, self.count_tests(self.all_tests))
def test_finding_test_with_unique_name_returns_1_test(self):
self._setup_scenario(self.file_dir, include_tests=["test_i_am_a_unique_test_name"])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEquals(1, self.count_tests(filtered_tests))
def test_finding_test_with_non_unique_name(self):
self._setup_scenario(self.file_dir, include_tests=["test_non_unique_name"])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEquals(1, self.count_tests(filtered_tests) > 2)
def test_finding_tests_with_regex_filters(self):
self._setup_scenario(self.file_dir, include_tests=["test_non*"])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEquals(1, self.count_tests(filtered_tests) > 2)
self._setup_scenario(self.file_dir, ["^$"])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEquals(0, self.count_tests(filtered_tests))
self._setup_scenario(self.file_dir, None, exclude_tests=["*"])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEquals(0, self.count_tests(filtered_tests))
def test_matching_tests(self):
self._setup_scenario(self.file_dir, None, ['StillYetAnotherSampleTest'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(1, self.count_tests(filtered_tests))
self._setup_scenario(self.file_dir, None, ['SampleTest.test_xxxxxx1'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(1, self.count_tests(filtered_tests))
self._setup_scenario(self.file_dir, None, ['SampleTest'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(8, self.count_tests(filtered_tests))
self._setup_scenario(self.file_dir, None, ['AnotherSampleTest.todo_not_tested'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(1, self.count_tests(filtered_tests))
self._setup_scenario(self.file_dir, None, ['StillYetAnotherSampleTest', 'SampleTest.test_xxxxxx1'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(2, self.count_tests(filtered_tests))
self._setup_scenario(self.file_dir, None, exclude_tests=['*'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(self.count_tests(filtered_tests), 0)
self._setup_scenario(self.file_dir, None, exclude_tests=['*a*'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(self.count_tests(filtered_tests), 6)
self.assertEqual(
set(self.MyTestRunner.list_test_names(filtered_tests)),
set(['test_1', 'test_2', 'test_xxxxxx1', 'test_xxxxxx2', 'test_xxxxxx3', 'test_xxxxxx4'])
)
self._setup_scenario(self.file_dir, None, exclude_tests=['*a*', '*x*'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(self.count_tests(filtered_tests), 2)
self.assertEqual(
set(self.MyTestRunner.list_test_names(filtered_tests)),
set(['test_1', 'test_2'])
)
self._setup_scenario(self.file_dir, None, exclude_files=['simple_test.py'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
names = self.MyTestRunner.list_test_names(filtered_tests)
self.assert_('test_xxxxxx1' not in names, 'Found: %s' % (names,))
self.assertEqual(
set(['test_abc', 'test_non_unique_name', 'test_non_unique_name', 'test_asdf2', 'test_i_am_a_unique_test_name', 'test_non_unique_name']),
set(names)
)
self._setup_scenario(self.file_dir, None, include_files=['simple3_test.py'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
names = self.MyTestRunner.list_test_names(filtered_tests)
self.assert_('test_xxxxxx1' not in names, 'Found: %s' % (names,))
self.assertEqual(
set(['test_non_unique_name']),
set(names)
)
def test_xml_rpc_communication(self):
notifications = []
class Server:
def __init__(self, notifications):
self.notifications = notifications
def notifyConnected(self):
#This method is called at the very start (in runfiles.py), and we do not check this here
raise AssertionError('Should not be called from the run tests.')
def notifyTestsCollected(self, number_of_tests):
self.notifications.append(('notifyTestsCollected', number_of_tests))
def notifyStartTest(self, file, test):
pass
def notifyTest(self, cond, captured_output, error_contents, file, test, time):
if error_contents:
error_contents = error_contents.splitlines()[-1].strip()
self.notifications.append(('notifyTest', cond, captured_output.strip(), error_contents, file, test))
def notifyTestRunFinished(self, total_time):
self.notifications.append(('notifyTestRunFinished',))
server = Server(notifications)
pydev_runfiles_xml_rpc.SetServer(server)
simple_test = os.path.join(self.file_dir[0], 'simple_test.py')
simple_test2 = os.path.join(self.file_dir[0], 'simple2_test.py')
files_to_tests = {}
files_to_tests.setdefault(simple_test , []).append('SampleTest.test_xxxxxx1' )
files_to_tests.setdefault(simple_test , []).append('SampleTest.test_xxxxxx2' )
files_to_tests.setdefault(simple_test , []).append('SampleTest.test_non_unique_name')
files_to_tests.setdefault(simple_test2, []).append('YetAnotherSampleTest.test_abc' )
self._setup_scenario(None, files_to_tests=files_to_tests)
self.MyTestRunner.verbosity = 2
buf = pydevd_io.StartRedirect(keep_original_redirection=False)
try:
self.MyTestRunner.run_tests()
self.assertEqual(6, len(notifications))
expected = [
('notifyTestsCollected', 4),
('notifyTest', 'ok', 'non unique name ran', '', simple_test, 'SampleTest.test_non_unique_name'),
('notifyTest', 'fail', '', 'AssertionError: Fail test 2', simple_test, 'SampleTest.test_xxxxxx1'),
('notifyTest', 'ok', '', '', simple_test, 'SampleTest.test_xxxxxx2'),
('notifyTest', 'ok', '', '', simple_test2, 'YetAnotherSampleTest.test_abc'),
('notifyTestRunFinished',),
]
expected.sort()
notifications.sort()
self.assertEqual(
expected,
notifications
)
finally:
pydevd_io.EndRedirect()
b = buf.getvalue()
self.assert_(b.find('Ran 4 tests in ') != -1, 'Found: '+b)
if __name__ == "__main__":
#this is so that we can run it frem the jython tests -- because we don't actually have an __main__ module
#(so, it won't try importing the __main__ module)
unittest.TextTestRunner().run(unittest.makeSuite(RunfilesTest))
|
|
"""
Properties of the Point Spread Function
=======================================
This script can be used to plot some PSF properties such as ellipticity and size as a function of the focal plane position.
:requires: PyFITS
:requires: NumPy
:requires: SciPy
:requires: matplotlib
:requires: VISsim-Python
:author: Sami-Matias Niemi
:contact: smn2@mssl.ucl.ac.uk
"""
import matplotlib
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import pyfits as pf
import numpy as np
import math, datetime, cPickle, itertools, re, glob
from scipy import ndimage
from scipy import interpolate
from analysis import shape
from support import logger as lg
from support import files as fileIO
def readData(file):
"""
Reads in the data from a given FITS file.
"""
return pf.getdata(file)
def parseName(file):
"""
Parse information from the input file name.
Example name::
detector_jitter-1_TOL05_MC_T0074_50arcmin2_grid_Nim=16384x16384_pixsize=1.000um_lbda=800nm_fieldX=-0.306_fieldY=1.042.fits
"""
xpos = float(re.compile('fieldX=([-+]?[0-9]*\.?[0-9]*)').findall(file)[0])
ypos = float(re.compile('fieldY=([-+]?[0-9]*\.?[0-9]*)').findall(file)[0])
lbda = float(re.compile('lbda=([0-9]*\.?[0-9]*)').findall(file)[0])
pixsize = float(re.compile('pixsize=([0-9]*\.?[0-9]*)').findall(file)[0])
out = dict(xpos=xpos, ypos=ypos, lbda=lbda, pixsize=pixsize)
return out
def measureChars(data, info, log):
"""
Measure ellipticity, R2, FWHM etc.
"""
#settings = dict(pixelSize=info['pixsize'], sampling=info['pixsize']/12.)
settings = dict(sampling=info['pixsize']/12.)
sh = shape.shapeMeasurement(data.copy(), log, **settings)
results = sh.measureRefinedEllipticity()
out = dict(ellipticity=results['ellipticity'], e1=results['e1'], e2=results['e2'], R2=results['R2'])
return out
def generatePlots(filedata, interactive=False):
"""
Generate a simple plot showing some results.
"""
x = []
y = []
e = []
R2 = []
e1 = []
e2 = []
for key, value in filedata.iteritems():
x.append(value['info']['xpos'])
y.append(value['info']['ypos'])
e.append(value['values']['ellipticity'])
e1.append(value['values']['e1'])
e2.append(value['values']['e2'])
R2.append(value['values']['R2'])
print key, value['values']['ellipticity'], value['values']['e1'], value['values']['e2'], value['values']['R2']
x = np.asarray(x)
y = np.asarray(y)
e = np.asarray(e)
R2 = np.asarray(R2) / 1.44264123086 #denominator is R_ref
#coordinate vectors
xi = np.linspace(np.min(x), np.max(x))
yi = np.linspace(np.min(y), np.max(y))
#data grids
Z = griddata(x, y, e, xi, yi, interp='linear')
X, Y = np.meshgrid(xi, yi)
#ellipticity
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.view_init(30, 225)
plt.title('PSF ellipticity over full VIS FoV')
ax.plot_surface(X, Y, Z, alpha=0.5, rstride=3, cstride=3, cmap=cm.jet, vmin=0.02, vmax=0.07, shade=True)
ax.set_zlim(0.02, 0.07)
ax.set_xlabel('FoV X [deg]', linespacing=3.2)
ax.set_ylabel('FoV Y [deg]', linespacing=3.2)
ax.w_zaxis.set_label_text(r'Ellipticity $e$', fontdict={'rotation' : 50})
if interactive:
plt.show()
else:
plt.savefig('ellipticity.png')
plt.close()
#same with Mayvi
#s = mlab.surf(X, Y, Z, colormap='Spectral')
#mlab.savefig('FoVEllipticity.pdf')
#R2
Z = griddata(x, y, R2, xi, yi, interp='linear')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.title(r'PSF wings $\left ( \frac{R}{R_{ref}} \right )^{2}$ over full VIS FoV')
ax.plot_surface(X, Y, Z, rstride=3, cstride=3, alpha=0.5, cmap=cm.jet, vmin=3.4, vmax=3.7)
ax.set_zlim(3.4, 3.7)
ax.set_xlabel('FoV X [deg]', linespacing=3.2)
ax.set_ylabel('FoV Y [deg]', linespacing=3.2)
ax.w_zaxis.set_label_text(r'$\left ( \frac{R}{R_{ref}} \right )^{2}$', linespacing=3.2, rotation='vertical')
ax.azim = 225
if interactive:
plt.show()
else:
plt.savefig('R2.png')
plt.close()
#vector plot of e components
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title('VIS System PSF $e_{1,2}$')
#Q = ax.quiver(x, y, -np.asarray(e1), e2, color='k', headwidth=1.5, headlength=3.5)
Q = ax.quiver(x, y, e1, e2, color='k', headwidth=1.5, headlength=3.5)
ax.quiverkey(Q, 0.9, 0.95, 0.1, r'$e_{i}$', labelpos='E', coordinates='figure', fontproperties={'weight': 'bold'})
ax.set_xlabel('FoV X [deg]')
ax.set_ylabel('FoV Y [deg]')
ax.set_xlim(ax.get_xlim()[0]*0.9, ax.get_xlim()[1]*1.1)
ax.set_ylim(ax.get_ylim()[0]*0.9, ax.get_ylim()[1]*1.1)
if interactive:
plt.show()
else:
plt.savefig('ecomponents.png')
plt.close()
def FoVanalysis(run=True, outfile='PSFdata.pk'):
#start the script
log = lg.setUpLogger('PSFproperties.log')
#derive results for each file
if run:
log.info('Deriving PSF properties...')
#find files
fls = glob.glob('/Volumes/disk_xray10/smn2/euclid/PSFs/detector_jitter-1_TOL05_MC_T0133_Nim=*.fits')
txt = 'Processing %i files...' % (len(fls))
print txt
log.info(txt)
filedata = {}
for file in fls:
data = readData(file)
info = parseName(file)
values = measureChars(data, info, log)
filedata[file] = dict(info=info, values=values)
txt = 'File %s processed...' % file
print txt
log.info(txt)
#save data
fileIO.cPickleDumpDictionary(filedata, outfile)
else:
filedata = cPickle.load(open(outfile))
#generate plots
generatePlots(filedata)
log.info('Run finished...\n\n\n')
def plotEncircledEnergy(radius, energy, scale=12):
"""
"""
txt = '%s' % datetime.datetime.isoformat(datetime.datetime.now())
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title('VIS Nominal System PSF: Encircled Energy')
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
ax.plot(radius, energy, 'bo-', label='Encircled Energy')
ax.set_ylabel('Encircled Energy / Total Energy')
ax.set_xlabel('Aperture Radius [microns] (12$\mu$m = 1 pixel = 0.1 arcsec)')
plt.legend(fancybox=True, shadow=True)
plt.savefig('EncircledEnergy.pdf')
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title('VIS Nominal System PSF: Encircled Energy')
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
#interpolata
rd = (12*10*1.3/2.)
f = interpolate.interp1d(radius, energy, kind='cubic')
val = f(rd)
rds = np.linspace(np.min(radius), np.max(radius), 100)
vals = f(rds)
ax.plot(rds/scale/10., vals, 'r--', label='Cubic Spline Interpolation')
txt = 'Energy within r=0.65 arcsec aperture = %f' % val
plt.text(0.5, 0.2, txt, ha='left', va='top', fontsize=10, transform=ax.transAxes, alpha=0.8)
ax.plot(radius/scale/10., energy, 'bo', label='Encircled Energy')
ax.axvline(x=0.65, ls=':', c='k')
ax.set_ylabel('Encircled Energy / Total Energy')
ax.set_xlabel('Aperture Radius [arcseconds on the sky]')
plt.legend(fancybox=True, shadow=True, loc='lower right', numpoints=1)
plt.savefig('EncircledEnergy2.pdf')
plt.close()
def encircledEnergy(file='data/psf12x.fits'):
"""
Calculates the encircled energy from a PSF.
The default input PSF is 12 times over-sampled with 1 micron pixel.
"""
#start the script
log = lg.setUpLogger('PSFencircledEnergy.log')
log.info('Reading data from %s' % file)
data = readData(file)
total = np.sum(data)
#assume that centre is the same as the peak pixel (zero indexed)
y, x = np.indices(data.shape)
ycen, xcen = ndimage.measurements.maximum_position(data)
log.info('Centre assumed to be (x, y) = (%i, %i)' % (xcen, ycen))
#change the peak to be 0, 0 and calculate radius
x -= xcen
y -= ycen
radius = np.sqrt(x**2 + y**2)
#calculate flux in different apertures
rads = np.arange(12, 600, 12)
energy = []
for radlimit in rads:
mask = radius < radlimit
energy.append(data[np.where(mask)].sum() / total)
energy = np.asarray(energy)
plotEncircledEnergy(rads, energy)
log.info('Run finished...\n\n\n')
def peakFraction(file='data/psf12x.fits', radius=0.65, oversample=12):
"""
Calculates the fraction of energy in the peak pixel for a given PSF compared
to an aperture of a given radius.
"""
#start the script
log = lg.setUpLogger('PSFpeakFraction.log')
log.info('Reading data from %s' % file)
#read data
data = readData(file)
#assume that centre is the same as the peak pixel (zero indexed)
y, x = np.indices(data.shape)
ycen, xcen = ndimage.measurements.maximum_position(data)
log.info('Centre assumed to be (x, y) = (%i, %i)' % (xcen, ycen))
#change the peak to be 0, 0 and calculate radius
x -= xcen
y -= ycen
rad = np.sqrt(x**2 + y**2)
#calculate flux in the apertures
mask = rad < (radius * oversample * 10)
energy = data[np.where(mask)].sum()
#calculat the flux in the peak pixel
if oversample > 1:
shift = oversample / 2
peak = data[ycen-shift:ycen+shift+1, xcen-shift:xcen+shift+1].sum()
else:
peak = data[ycen, xcen]
print peak / energy
log.info('Run finished...\n\n\n')
def shapeComparisonToAST(oversample=3.):
"""
To calculate shapes from AST PSFs.
One of the actions from the PLM-SRR was 8941 (RID No: ENG-219), with the
following wording:
ASFT shall provide to the VIS team a PSF profile with associated R2
with the sampling set to 4 microns and the VIS team will check that when
applying the R2 processing the result is identical, to double check that
the process is correct.
"""
log = lg.setUpLogger('delete.log')
files = glob.glob('*.fits')
files = sorted(files)
for file in files:
data = pf.getdata(file)
settings = dict(sampling=1.0/oversample, itereations=20)
sh = shape.shapeMeasurement(data, log, **settings)
reference = sh.measureRefinedEllipticity()
R2 = reference['R2'] #in pixels
R2a = reference['R2arcsec']
print file, R2, R2a
if __name__ == '__main__':
#FoVanalysis()
#encircledEnergy()
#peakFraction()
shapeComparisonToAST()
|
|
# Copyright (c) 2019 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import datetime
import errno
import json
import logging
import os
import re
import sys
import traceback
from log import logging
from keystoneauth1.identity import v3
from keystoneauth1.identity import v2
from keystoneauth1 import session
from keystoneclient.v3 import client
from keystoneclient.v2_0 import client as client_v2
from novaclient import client as nova_client
from neutronclient.neutron import client as neutron_client
from glanceclient import Client as glance_client
from cinderclient.client import Client as cinder_client
# from cinderclient import exceptions as c_exc
from neutronclient.common import exceptions as n_exc
from novaclient.client import exceptions as nova_exc
from glanceclient import exc as g_exc
from keystoneclient.openstack.common.apiclient import exceptions as k_exc
import requests.packages.urllib3
OPENRC_FROM = './from_auth'
OPENRC_TO = './to_auth'
class AuthStack(object):
def __init__(self):
logger = logging.getLogger('copystack')
logger = logging.getLogger('copystack.auth_stack.AuthStack')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
from_auth = self.get_auth_details('from')
to_auth = self.get_auth_details('to')
self.from_auth_url = from_auth['OS_AUTH_URL']
self.from_auth_ip = from_auth['OS_AUTH_IP']
self.from_username = from_auth['OS_USERNAME']
self.from_password = from_auth['OS_PASSWORD']
self.from_tenant_name = from_auth['OS_TENANT_NAME']
self.from_cert = from_auth['OS_CACERT']
self.from_user_domain_id = from_auth['USER_DOMAIN_ID']
self.from_project_domain_id = from_auth['PROJECT_DOMAIN_ID']
self.from_cinder_version = from_auth['CINDER_VERSION']
self.from_keystone_version = from_auth['KEYSTONE_VERSION']
self.from_nova_version = from_auth['NOVA_VERSION']
self.solid_fire_ip = from_auth['SOLID_FIRE_IP']
self.solid_fire_user = from_auth['SOLID_FIRE_USER']
self.solid_fire_password = from_auth['SOLID_FIRE_PASSWORD']
self.solid_fire_host = from_auth['SOLID_FIRE_HOST']
self.nfs_host = from_auth['NFS_HOST']
self.nfs_dir = from_auth['NFS_DIR']
self.nfs_ip = from_auth['NFS_IP']
self.nfs_libvirt_location = from_auth['NFS_LIBVIRT_LOCATION']
self.from_nfs_glance_location = from_auth['NFS_GLANCE_LOCATION']
self.nfs_cinder_location = from_auth['NFS_CINDER_LOCATION']
self.from_nova_port = from_auth['NOVA_PORT']
self.from_cinder_port = from_auth['CINDER_PORT']
# self.from_keystone_port = from_auth['KEYSTONE_PORT']
self.from_neutron_port = from_auth['NEUTRON_PORT']
self.from_glance_port = from_auth['GLANCE_PORT']
self.from_domain_id = from_auth['DOMAIN_ID']
self.to_auth_url = to_auth['OS_AUTH_URL']
self.to_auth_ip = to_auth['OS_AUTH_IP']
self.to_username = to_auth['OS_USERNAME']
self.to_password = to_auth['OS_PASSWORD']
self.to_tenant_name = to_auth['OS_TENANT_NAME']
self.to_cert = to_auth['OS_CACERT']
self.to_user_domain_id = to_auth['USER_DOMAIN_ID']
self.to_project_domain_id = to_auth['PROJECT_DOMAIN_ID']
self.to_cinder_version = to_auth['CINDER_VERSION']
self.to_keystone_version = to_auth['KEYSTONE_VERSION']
self.to_nova_version = to_auth['NOVA_VERSION']
self.to_nova_port = to_auth['NOVA_PORT']
self.to_cinder_port = to_auth['CINDER_PORT']
# self.to_keystone_port = to_auth['KEYSTONE_PORT']
self.to_neutron_port = to_auth['NEUTRON_PORT']
self.to_glance_port = to_auth['GLANCE_PORT']
self.to_domain_id = to_auth['DOMAIN_ID']
self.to_nfs_glance_location = to_auth['NFS_GLANCE_LOCATION']
#to disable warnings on certs missing subjectAltName
#https://github.com/shazow/urllib3/issues/497#issuecomment-66942891
requests.packages.urllib3.disable_warnings()
def get_from_auth_ref(self):
if self.from_keystone_version == '2':
keystone = client_v2.Client(cacert=self.from_cert, username=self.from_username, password=self.from_password,
tenant_name=self.from_tenant_name, auth_url=self.from_auth_url)
# keystone.management_url = self.from_auth_url
# keystone.auth_url = self.from_auth_url
# print keystone.auth_ref
# print keystone
else:
auth = v3.Password(auth_url=self.from_auth_url, username=self.from_username, password=self.from_password,
project_name=self.from_tenant_name, user_domain_id=self.from_user_domain_id,
project_domain_id=self.from_project_domain_id)
sess = session.Session(auth=auth,
verify=self.from_cert)
keystone = client.Client(session=sess, endpoint_override=self.from_auth_url)
return keystone
def get_to_auth_ref(self):
auth = v3.Password(auth_url=self.to_auth_url, username=self.to_username, password=self.to_password,
project_name=self.to_tenant_name, user_domain_id=self.to_user_domain_id,
project_domain_id=self.to_project_domain_id)
sess = session.Session(auth=auth, verify=self.to_cert)
keystone = client.Client(session=sess, endpoint_override=self.to_auth_url)
return keystone
def get_from_keystone_client(self):
return self.get_from_auth_ref()
def get_to_keystone_client(self):
return self.get_to_auth_ref()
def get_keystone_client(self, destination):
if destination == 'to':
return self.get_to_keystone_client()
else:
return self.get_from_keystone_client()
def get_from_nova_client(self):
if self.from_nova_version == '2':
auth_ref = self.get_from_auth_ref().auth_ref
auth_token = auth_ref['token']['id']
tenant_id = auth_ref['token']['tenant']['id']
bypass_url = '{ip}:{port}/v2/{tenant_id}' \
.format(ip=self.from_auth_ip, port=self.from_nova_port, tenant_id=tenant_id)
nova = nova_client.Client('2', auth_token=auth_token, bypass_url=bypass_url, cacert=self.from_cert)
else:
auth_ref = self.get_from_auth_ref()
# todo: check this works for before newton. might have to change it to tenant_id
project_id = auth_ref.session.get_project_id()
bypass_url = '{ip}:{port}/v2.1/{tenant_id}' \
.format(ip=self.from_auth_ip, port=self.from_nova_port, tenant_id=project_id)
nova = nova_client.Client('2.1', session=auth_ref.session, endpoint_override=bypass_url)
return nova
def get_to_nova_client(self):
auth_ref = self.get_to_auth_ref()
# todo: check this works for before newton. might have to change it to tenant_id
project_id = auth_ref.session.get_project_id()
bypass_url = '{ip}:{port}/v2.1/{tenant_id}' \
.format(ip=self.to_auth_ip, port=self.to_nova_port, tenant_id=project_id)
nova = nova_client.Client('2.1', session=auth_ref.session, endpoint_override=bypass_url)
return nova
def get_nova_client(self, destination):
if destination == 'to':
return self.get_to_nova_client()
else:
return self.get_from_nova_client()
def get_from_neutron_client(self):
auth_ref = self.get_from_auth_ref()
endpoint_url = '{ip}:{port}'.format(ip=self.from_auth_ip, port=self.from_neutron_port)
neutron = neutron_client.Client('2.0', session=auth_ref.session, endpoint_override=endpoint_url)
return neutron
def get_to_neutron_client(self):
auth_ref = self.get_to_auth_ref()
endpoint_url = '{ip}:{port}'.format(ip=self.to_auth_ip, port=self.to_neutron_port)
neutron = neutron_client.Client('2.0', session=auth_ref.session, endpoint_override=endpoint_url)
return neutron
def get_neutron_client(self, destination):
if destination == 'to':
return self.get_to_neutron_client()
else:
return self.get_from_neutron_client()
def get_from_glance_client(self):
auth_ref = self.get_from_auth_ref()
endpoint_url = '{ip}:{port}'.format(ip=self.from_auth_ip, port=self.from_glance_port)
glance = glance_client('2', session=auth_ref.session, endpoint=endpoint_url)
return glance
def get_to_glance_client(self):
auth_ref = self.get_to_auth_ref()
endpoint_url = '{ip}:{port}'.format(ip=self.to_auth_ip, port=self.to_glance_port,)
glance = glance_client('2', endpoint=endpoint_url, session=auth_ref.session)
return glance
def get_glance_client(self, destination):
if destination == 'to':
return self.get_to_glance_client()
else:
return self.get_from_glance_client()
def get_from_cinder_client(self):
if self.from_keystone_version == '2':
return self.get_from_cinder_client_keystone2()
else:
return self.get_from_cinder_client_keystone3()
# this is really more about which keystone version is running... if keystone 2, use this call
# if keystone 3, the other
def get_from_cinder_client_keystone2(self):
auth_ref = self.get_from_auth_ref().auth_ref
# auth_ref = self.get_from_auth_ref()
token = auth_ref['token']['id']
tenant_id = auth_ref['token']['tenant']['id']
endpoint_url = ('{ip}:{port}/v1/{tenant}'.format
(ip=self.from_auth_ip, port=self.from_cinder_port, tenant=tenant_id))
# print endpoint_url
cinder = cinder_client('1', self.from_username, token,
project_id=self.from_tenant_name,
auth_url=self.from_auth_url, cacert=self.from_cert)
cinder.client.auth_token = token
cinder.client.management_url = endpoint_url
return cinder
def get_from_cinder_client_keystone3(self):
auth_ref = self.get_from_auth_ref()
project_id = auth_ref.session.get_project_id()
endpoint_url = ('{ip}:{port}/v{version}/{project_id}'.format
(ip=self.from_auth_ip, port=self.from_cinder_port, version=self.from_cinder_version, project_id=project_id))
cinder = cinder_client(self.from_cinder_version, session=auth_ref.session, bypass_url=endpoint_url)
cinder.client.management_url = endpoint_url
return cinder
def get_to_cinder_client(self):
auth_ref = self.get_to_auth_ref()
project_id = auth_ref.session.get_project_id()
endpoint_url = ('{ip}:{port}/v2/{project_id}'.format
(ip=self.to_auth_ip, port=self.to_cinder_port, project_id=project_id))
cinder = cinder_client('2', session=auth_ref.session, bypass_url=endpoint_url)
cinder.client.management_url = endpoint_url
return cinder
def get_cinder_client(self, destination):
if destination == 'to':
return self.get_to_cinder_client()
else:
return self.get_from_cinder_client()
def get_auth_details(self, destination):
AUTH_DETAILS = {'OS_USERNAME': None,
'OS_PASSWORD': None,
'OS_TENANT_NAME': None,
'OS_AUTH_URL': None,
'OS_AUTH_IP': None,
'OS_CACERT': None,
'USER_DOMAIN_ID': None,
'PROJECT_DOMAIN_ID': None,
'CINDER_VERSION': None,
'KEYSTONE_VERSION': None,
'NOVA_VERSION': None,
'NOVA_PORT': None,
'CINDER_PORT': None,
'GLANCE_PORT': None,
'NEUTRON_PORT': None,
'DOMAIN_ID': None,
'SOLID_FIRE_IP': None,
'SOLID_FIRE_USER': None,
'SOLID_FIRE_PASSWORD': None,
'SOLID_FIRE_HOST': None,
'NFS_HOST': None,
'NFS_DIR' : None,
'NFS_IP' : None,
'NFS_LIBVIRT_LOCATION': None,
'NFS_GLANCE_LOCATION': None,
'NFS_CINDER_LOCATION': None
}
auth_details = AUTH_DETAILS
pattern = re.compile(
'^(?:export\s)?(?P<key>\w+)(?:\s+)?=(?:\s+)?(?P<value>.*)$'
)
try:
if destination == 'to':
openrc_file = OPENRC_TO
else:
openrc_file = OPENRC_FROM
with open(openrc_file) as openrc:
for line in openrc:
match = pattern.match(line)
if match is None:
continue
k = match.group('key')
v = match.group('value')
if k in auth_details and auth_details[k] is None:
auth_details[k] = v
except IOError as e:
if e.errno != errno.ENOENT:
print str(e)
# no openrc file, so we try the environment
for key in auth_details.keys():
auth_details[key] = os.environ.get(key)
for key in auth_details.keys():
if auth_details[key] is None:
print '%s not set' % key
return auth_details
|
|
#------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Copyright (C) 2008-2012, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
import re
from ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, Coord, ParseError
from .ast_transforms import fix_switch_cases
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lextab='pycparser.lextab',
yacc_optimize=True,
yacctab='pycparser.yacctab',
yacc_debug=False):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
"""
self.clex = CLexer(
error_func=self._lex_error_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'parameter_type_list',
'specifier_qualifier_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab)
# Stack of scopes for keeping track of typedefs. _scope_stack[-1] is
# the current (topmost) scope.
#
self._scope_stack = [set()]
def parse(self, text, filename='', debuglevel=0):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [set()]
return self.cparser.parse(text, lexer=self.clex, debug=debuglevel)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(set())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_type(self, name):
""" Add a new typedef-name to the current scope
"""
self._scope_stack[-1].add(name)
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
return any(name in scope for scope in self._scope_stack)
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
return self._is_type_in_scope(name)
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module
# c_ast) and the modifiers are FuncDecl, PtrDecl and
# ArrayDecl.
#
# The standard states that whenever a new modifier is parsed,
# it should be added to the end of the list of modifiers. For
# example:
#
# K&R2 A.8.6.2: Array Declarators
#
# In a declaration T D where D has the form
# D1 [constant-expression-opt]
# and the type of the identifier in the declaration T D1 is
# "type-modifier T", the type of the
# identifier of D is "type-modifier array of T"
#
# This is what this method does. The declarator it receives
# can be a list of declarators ending with TypeDecl. It
# tacks the modifier to the end of this list, just before
# the TypeDecl.
#
# Additionally, the modifier may be a list itself. This is
# useful for pointers, that can come as a chain from the rule
# p_pointer. In this case, the whole modifier list is spliced
# into the new location.
#
def _type_modify_decl(self, decl, modifier):
""" Tacks a type modifier on a declarator, and returns
the modified declarator.
Note: the declarator and modifier may be modified
"""
#~ print '****'
#~ decl.show(offset=3)
#~ modifier.show(offset=3)
#~ print '****'
modifier_head = modifier
modifier_tail = modifier
# The modifier may be a nested list. Reach its tail.
#
while modifier_tail.type:
modifier_tail = modifier_tail.type
# If the decl is a basic type, just tack the modifier onto
# it
#
if isinstance(decl, c_ast.TypeDecl):
modifier_tail.type = decl
return modifier
else:
# Otherwise, the decl is a list of modifiers. Reach
# its tail and splice the modifier onto the tail,
# pointing to the underlying basic type.
#
decl_tail = decl
while not isinstance(decl_tail.type, c_ast.TypeDecl):
decl_tail = decl_tail.type
modifier_tail.type = decl_tail.type
decl_tail.type = modifier_head
return decl
# Due to the order in which declarators are constructed,
# they have to be fixed in order to look like a normal AST.
#
# When a declaration arrives from syntax construction, it has
# these problems:
# * The innermost TypeDecl has no type (because the basic
# type is only known at the uppermost declaration level)
# * The declaration has no variable name, since that is saved
# in the innermost TypeDecl
# * The typename of the declaration is a list of type
# specifiers, and not a node. Here, basic identifier types
# should be separated from more complex types like enums
# and structs.
#
# This method fixes these problem.
#
def _fix_decl_name_type(self, decl, typename):
""" Fixes a declaration. Modifies decl.
"""
# Reach the underlying basic type
#
type = decl
while not isinstance(type, c_ast.TypeDecl):
type = type.type
decl.name = type.declname
type.quals = decl.quals
# The typename is a list of types. If any type in this
# list isn't an IdentifierType, it must be the only
# type in the list (it's illegal to declare "int enum .."
# If all the types are basic, they're collected in the
# IdentifierType holder.
#
for tn in typename:
if not isinstance(tn, c_ast.IdentifierType):
if len(typename) > 1:
self._parse_error(
"Invalid multiple types specified", tn.coord)
else:
type.type = tn
return decl
if not typename:
# Functions default to returning int
#
if not isinstance(decl.type, c_ast.FuncDecl):
self._parse_error(
"Missing type in declaration", decl.coord)
type.type = c_ast.IdentifierType(
['int'],
coord=decl.coord)
else:
# At this point, we know that typename is a list of IdentifierType
# nodes. Concatenate all the names into a single list.
#
type.type = c_ast.IdentifierType(
[name for id in typename for name in id.names],
coord=typename[0].coord)
return decl
def _add_declaration_specifier(self, declspec, newspec, kind):
""" Declaration specifiers are represented by a dictionary
with the entries:
* qual: a list of type qualifiers
* storage: a list of storage type qualifiers
* type: a list of type specifiers
* function: a list of function specifiers
This method is given a declaration specifier, and a
new specifier of a given kind.
Returns the declaration specifier, with the new
specifier incorporated.
"""
spec = declspec or dict(qual=[], storage=[], type=[], function=[])
spec[kind].insert(0, newspec)
return spec
def _build_function_definition(self, decl, spec, param_decls, body):
""" Builds a function definition.
"""
declaration = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl,
init=None,
bitsize=None,
coord=decl.coord)
typename = spec['type']
declaration = self._fix_decl_name_type(declaration, typename)
return c_ast.FuncDef(
decl=declaration,
param_decls=param_decls,
body=body,
coord=decl.coord)
def _select_struct_union_class(self, token):
""" Given a token (either STRUCT or UNION), selects the
appropriate AST class.
"""
if token == 'struct':
return c_ast.Struct
else:
return c_ast.Union
##
## Precedence and associativity of operators
##
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
##
## Grammar productions
## Implementation of the BNF defined in K&R2 A.13
##
# Wrapper around a translation unit, to allow for empty input.
# Not strictly part of the C99 Grammar, but useful in practice.
#
def p_translation_unit_or_empty(self, p):
""" translation_unit_or_empty : translation_unit
| empty
"""
if p[1] is None:
p[0] = c_ast.FileAST([])
else:
p[0] = c_ast.FileAST(p[1])
def p_translation_unit_1(self, p):
""" translation_unit : external_declaration
"""
# Note: external_declaration is already a list
#
p[0] = p[1]
def p_translation_unit_2(self, p):
""" translation_unit : translation_unit external_declaration
"""
if p[2] is not None:
p[1].extend(p[2])
p[0] = p[1]
# Declarations always come as lists (because they can be
# several in one line), so we wrap the function definition
# into a list as well, to make the return value of
# external_declaration homogenous.
#
def p_external_declaration_1(self, p):
""" external_declaration : function_definition
"""
p[0] = [p[1]]
def p_external_declaration_2(self, p):
""" external_declaration : declaration
"""
p[0] = p[1]
def p_external_declaration_3(self, p):
""" external_declaration : pp_directive
"""
p[0] = p[1]
def p_external_declaration_4(self, p):
""" external_declaration : SEMI
"""
p[0] = None
def p_pp_directive(self, p):
""" pp_directive : PPHASH
"""
self._parse_error('Directives not supported yet',
self._coord(p.lineno(1)))
# In function definitions, the declarator can be followed by
# a declaration list, for old "K&R style" function definitios.
#
def p_function_definition_1(self, p):
""" function_definition : declarator declaration_list_opt compound_statement
"""
# no declaration specifiers - 'int' becomes the default type
spec = dict(
qual=[],
storage=[],
type=[c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))],
function=[])
p[0] = self._build_function_definition(
decl=p[1],
spec=spec,
param_decls=p[2],
body=p[3])
def p_function_definition_2(self, p):
""" function_definition : declaration_specifiers declarator declaration_list_opt compound_statement
"""
spec = p[1]
p[0] = self._build_function_definition(
decl=p[2],
spec=spec,
param_decls=p[3],
body=p[4])
def p_statement(self, p):
""" statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
"""
p[0] = p[1]
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
# However, for the AST, we will split them to separate Decl
# nodes.
#
# This rule splits its declarations and always returns a list
# of Decl nodes, even if it's one element long.
#
def p_decl_body(self, p):
""" decl_body : declaration_specifiers init_declarator_list_opt
"""
spec = p[1]
is_typedef = 'typedef' in spec['storage']
decls = []
# p[2] (init_declarator_list_opt) is either a list or None
#
if p[2] is None:
# Then it's a declaration of a struct / enum tag,
# without an actual declarator.
#
ty = spec['type']
if len(ty) > 1:
coord = '?'
for t in ty:
if hasattr(t, 'coord'):
coord = t.coord
break
self._parse_error('Multiple type specifiers with a type tag',
coord)
decl = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=ty[0],
init=None,
bitsize=None,
coord=ty[0].coord)
decls = [decl]
else:
for decl, init in p[2] or []:
if is_typedef:
decl = c_ast.Typedef(
name=None,
quals=spec['qual'],
storage=spec['storage'],
type=decl,
coord=decl.coord)
else:
decl = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl,
init=init,
bitsize=None,
coord=decl.coord)
typename = spec['type']
fixed_decl = self._fix_decl_name_type(decl, typename)
# Add the type name defined by typedef to a
# symbol table (for usage in the lexer)
#
if is_typedef:
self._add_typedef_type(fixed_decl.name)
decls.append(fixed_decl)
p[0] = decls
# The declaration has been split to a decl_body sub-rule and
# SEMI, because having them in a single rule created a problem
# for defining typedefs.
#
# If a typedef line was directly followed by a line using the
# type defined with the typedef, the type would not be
# recognized. This is because to reduce the declaration rule,
# the parser's lookahead asked for the token after SEMI, which
# was the type from the next line, and the lexer had no chance
# to see the updated type symbol table.
#
# Splitting solves this problem, because after seeing SEMI,
# the parser reduces decl_body, which actually adds the new
# type into the table to be seen by the lexer before the next
# line is reached.
#
def p_declaration(self, p):
""" declaration : decl_body SEMI
"""
p[0] = p[1]
# Since each declaration is a list of declarations, this
# rule will combine all the declarations and return a single
# list
#
def p_declaration_list(self, p):
""" declaration_list : declaration
| declaration_list declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_declaration_specifiers_1(self, p):
""" declaration_specifiers : type_qualifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_declaration_specifiers_2(self, p):
""" declaration_specifiers : type_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
def p_declaration_specifiers_3(self, p):
""" declaration_specifiers : storage_class_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
def p_declaration_specifiers_4(self, p):
""" declaration_specifiers : function_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
def p_storage_class_specifier(self, p):
""" storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
"""
p[0] = p[1]
def p_function_specifier(self, p):
""" function_specifier : INLINE
"""
p[0] = p[1]
def p_type_specifier_1(self, p):
""" type_specifier : VOID
| _BOOL
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| _COMPLEX
| SIGNED
| UNSIGNED
"""
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_type_specifier_2(self, p):
""" type_specifier : typedef_name
| enum_specifier
| struct_or_union_specifier
"""
p[0] = p[1]
def p_const_or_type(self, p):
""" const_or_type : constant
| unified_string_literal
| unified_wstring_literal
| type_name
| template
"""
if len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_declaration_specifiers_list(self, p):
""" declaration_specifiers_list : const_or_type
| declaration_specifiers_list COMMA
| declaration_specifiers_list COMMA const_or_type
"""
if len(p) == 2:
p[0] = [p[1]]
elif len(p) == 3:
p[0] = p[1]
else:
const_or_type = p[3]
if isinstance(const_or_type, dict):
types = const_or_type['type']
else:
types = [const_or_type]
p[0] = p[1] + types
def p_declaration_specifier_template(self, p):
""" declaration_specifiers : template """
p[0] = p[1]
def p_template(self, p):
""" template : type_specifier LT declaration_specifiers_list GT """
subtypes = []
for subtype in p[3]:
if isinstance(subtype, dict):
subtype, = subtype['type']
subtypes.append(subtype)
template = c_ast.Template(
left=p[1],
right=subtypes,
coord=p[1].coord)
p[0] = self._add_declaration_specifier(None, template, 'type')
def p_type_qualifier(self, p):
""" type_qualifier : CONST
| RESTRICT
| VOLATILE
"""
p[0] = p[1]
def p_init_declarator_list(self, p):
""" init_declarator_list : init_declarator
| init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# Returns a (declarator, initializer) pair
# If there's no initializer, returns (declarator, None)
#
def p_init_declarator(self, p):
""" init_declarator : declarator
| declarator EQUALS initializer
"""
p[0] = (p[1], p[3] if len(p) > 2 else None)
def p_specifier_qualifier_list_1(self, p):
""" specifier_qualifier_list : type_qualifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_specifier_qualifier_list_2(self, p):
""" specifier_qualifier_list : type_specifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
# TYPEID is allowed here (and in other struct/enum related tag names), because
# struct/enum tags reside in their own namespace and can be named the same as types
#
def p_struct_or_union_specifier_1(self, p):
""" struct_or_union_specifier : struct_or_union ID
| struct_or_union TYPEID
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=None,
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_2(self, p):
""" struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=None,
decls=p[3],
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_3(self, p):
""" struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
| struct_or_union TYPEID brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=p[4],
coord=self._coord(p.lineno(2)))
def p_struct_or_union(self, p):
""" struct_or_union : STRUCT
| UNION
"""
p[0] = p[1]
# Combine all declarations into a single list
#
def p_struct_declaration_list(self, p):
""" struct_declaration_list : struct_declaration
| struct_declaration_list struct_declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_struct_declaration_1(self, p):
""" struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI
"""
spec = p[1]
decls = []
if p[2] is not None:
for struct_decl in p[2]:
if struct_decl['decl'] is not None:
decl_coord = struct_decl['decl'].coord
else:
decl_coord = struct_decl['bitsize'].coord
decl = c_ast.Decl(
name=None,
quals=spec['qual'],
funcspec=spec['function'],
storage=spec['storage'],
type=struct_decl['decl'],
init=None,
bitsize=struct_decl['bitsize'],
coord=decl_coord)
typename = spec['type']
decls.append(self._fix_decl_name_type(decl, typename))
else:
# Anonymous struct/union, gcc extension, C1x feature.
# Although the standard only allows structs/unions here, I see no
# reason to disallow other types since some compilers have typedefs
# here, and pycparser isn't about rejecting all invalid code.
#
node = spec['type'][0]
if isinstance(node, c_ast.Node):
decl_type = node
else:
decl_type = c_ast.IdentifierType(node)
decl = c_ast.Decl(
name=None,
quals=spec['qual'],
funcspec=spec['function'],
storage=spec['storage'],
type=decl_type,
init=None,
bitsize=None,
coord=self._coord(p.lineno(3)))
decls.append(decl)
p[0] = decls
def p_struct_declarator_list(self, p):
""" struct_declarator_list : struct_declarator
| struct_declarator_list COMMA struct_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# struct_declarator passes up a dict with the keys: decl (for
# the underlying declarator) and bitsize (for the bitsize)
#
def p_struct_declarator_1(self, p):
""" struct_declarator : declarator
"""
p[0] = {'decl': p[1], 'bitsize': None}
def p_struct_declarator_2(self, p):
""" struct_declarator : declarator COLON constant_expression
| COLON constant_expression
"""
if len(p) > 3:
p[0] = {'decl': p[1], 'bitsize': p[3]}
else:
p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]}
def p_enum_specifier_1(self, p):
""" enum_specifier : ENUM ID
| ENUM TYPEID
"""
p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1)))
def p_enum_specifier_2(self, p):
""" enum_specifier : ENUM brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1)))
def p_enum_specifier_3(self, p):
""" enum_specifier : ENUM ID brace_open enumerator_list brace_close
| ENUM TYPEID brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1)))
def p_enumerator_list(self, p):
""" enumerator_list : enumerator
| enumerator_list COMMA
| enumerator_list COMMA enumerator
"""
if len(p) == 2:
p[0] = c_ast.EnumeratorList([p[1]], p[1].coord)
elif len(p) == 3:
p[0] = p[1]
else:
p[1].enumerators.append(p[3])
p[0] = p[1]
def p_enumerator(self, p):
""" enumerator : ID
| ID EQUALS constant_expression
"""
if len(p) == 2:
p[0] = c_ast.Enumerator(
p[1], None,
self._coord(p.lineno(1)))
else:
p[0] = c_ast.Enumerator(
p[1], p[3],
self._coord(p.lineno(1)))
def p_declarator_1(self, p):
""" declarator : direct_declarator
"""
p[0] = p[1]
def p_declarator_2(self, p):
""" declarator : pointer direct_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_direct_declarator_1(self, p):
""" direct_declarator : ID
"""
p[0] = c_ast.TypeDecl(
declname=p[1],
type=None,
quals=None,
coord=self._coord(p.lineno(1)))
def p_direct_declarator_2(self, p):
""" direct_declarator : LPAREN declarator RPAREN
"""
p[0] = p[2]
def p_direct_declarator_3(self, p):
""" direct_declarator : direct_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
# Special for VLAs
#
def p_direct_declarator_4(self, p):
""" direct_declarator : direct_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_5(self, p):
""" direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN
| direct_declarator LPAREN identifier_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_pointer(self, p):
""" pointer : TIMES type_qualifier_list_opt
| TIMES type_qualifier_list_opt pointer
"""
coord = self._coord(p.lineno(1))
p[0] = c_ast.PtrDecl(
quals=p[2] or [],
type=p[3] if len(p) > 3 else None,
coord=coord)
def p_type_qualifier_list(self, p):
""" type_qualifier_list : type_qualifier
| type_qualifier_list type_qualifier
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_parameter_type_list(self, p):
""" parameter_type_list : parameter_list
| parameter_list COMMA ELLIPSIS
"""
if len(p) > 2:
p[1].params.append(c_ast.EllipsisParam(self._coord(p.lineno(3))))
p[0] = p[1]
def p_parameter_list(self, p):
""" parameter_list : parameter_declaration
| parameter_list COMMA parameter_declaration
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_parameter_declaration_1(self, p):
""" parameter_declaration : declaration_specifiers declarator
"""
spec = p[1]
decl = p[2]
decl = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl,
init=None,
bitsize=None,
coord=decl.coord)
typename = spec['type'] or ['int']
p[0] = self._fix_decl_name_type(decl, typename)
def p_parameter_declaration_2(self, p):
""" parameter_declaration : declaration_specifiers abstract_declarator_opt
"""
spec = p[1]
decl = c_ast.Typename(
quals=spec['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
typename = spec['type'] or ['int']
p[0] = self._fix_decl_name_type(decl, typename)
def p_identifier_list(self, p):
""" identifier_list : identifier
| identifier_list COMMA identifier
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_initializer_1(self, p):
""" initializer : assignment_expression
"""
p[0] = p[1]
def p_initializer_2(self, p):
""" initializer : brace_open initializer_list brace_close
| brace_open initializer_list COMMA brace_close
"""
p[0] = p[2]
def p_initializer_list(self, p):
""" initializer_list : designation_opt initializer
| initializer_list COMMA designation_opt initializer
"""
if len(p) == 3: # single initializer
init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
p[0] = c_ast.InitList([init], p[2].coord)
else:
init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
p[1].exprs.append(init)
p[0] = p[1]
def p_designation(self, p):
""" designation : designator_list EQUALS
"""
p[0] = p[1]
# Designators are represented as a list of nodes, in the order in which
# they're written in the code.
#
def p_designator_list(self, p):
""" designator_list : designator
| designator_list designator
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_designator(self, p):
""" designator : LBRACKET constant_expression RBRACKET
| PERIOD identifier
"""
p[0] = p[2]
def p_type_name(self, p):
""" type_name : specifier_qualifier_list abstract_declarator_opt
"""
#~ print '=========='
#~ print p[1]
#~ print p[2]
#~ print p[2].children()
#~ print '=========='
typename = c_ast.Typename(
quals=p[1]['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
def p_abstract_declarator_1(self, p):
""" abstract_declarator : pointer
"""
dummytype = c_ast.TypeDecl(None, None, None)
p[0] = self._type_modify_decl(
decl=dummytype,
modifier=p[1])
def p_abstract_declarator_2(self, p):
""" abstract_declarator : pointer direct_abstract_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_abstract_declarator_3(self, p):
""" abstract_declarator : direct_abstract_declarator
"""
p[0] = p[1]
# Creating and using direct_abstract_declarator_opt here
# instead of listing both direct_abstract_declarator and the
# lack of it in the beginning of _1 and _2 caused two
# shift/reduce errors.
#
def p_direct_abstract_declarator_1(self, p):
""" direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
p[0] = p[2]
def p_direct_abstract_declarator_2(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_3(self, p):
""" direct_abstract_declarator : LBRACKET assignment_expression_opt RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=p[2],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_5(self, p):
""" direct_abstract_declarator : LBRACKET TIMES RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_7(self, p):
""" direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
"""
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(1)))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
#
def p_block_item(self, p):
""" block_item : declaration
| statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
# Since we made block_item a list, this just combines lists
#
def p_block_item_list(self, p):
""" block_item_list : block_item
| block_item_list block_item
"""
# Empty block items (plain ';') produce [None], so ignore them
p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
def p_compound_statement_1(self, p):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
coord=self._coord(p.lineno(1)))
def p_labeled_statement_1(self, p):
""" labeled_statement : ID COLON statement """
p[0] = c_ast.Label(p[1], p[3], self._coord(p.lineno(1)))
def p_labeled_statement_2(self, p):
""" labeled_statement : CASE constant_expression COLON statement """
p[0] = c_ast.Case(p[2], [p[4]], self._coord(p.lineno(1)))
def p_labeled_statement_3(self, p):
""" labeled_statement : DEFAULT COLON statement """
p[0] = c_ast.Default([p[3]], self._coord(p.lineno(1)))
def p_selection_statement_1(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement """
p[0] = c_ast.If(p[3], p[5], None, self._coord(p.lineno(1)))
def p_selection_statement_2(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement ELSE statement """
p[0] = c_ast.If(p[3], p[5], p[7], self._coord(p.lineno(1)))
def p_selection_statement_3(self, p):
""" selection_statement : SWITCH LPAREN expression RPAREN statement """
p[0] = fix_switch_cases(
c_ast.Switch(p[3], p[5], self._coord(p.lineno(1))))
def p_iteration_statement_1(self, p):
""" iteration_statement : WHILE LPAREN expression RPAREN statement """
p[0] = c_ast.While(p[3], p[5], self._coord(p.lineno(1)))
def p_iteration_statement_2(self, p):
""" iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI """
p[0] = c_ast.DoWhile(p[5], p[2], self._coord(p.lineno(1)))
def p_iteration_statement_3(self, p):
""" iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._coord(p.lineno(1)))
def p_iteration_statement_4(self, p):
""" iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(c_ast.DeclList(p[3]), p[4], p[6], p[8], self._coord(p.lineno(1)))
def p_jump_statement_1(self, p):
""" jump_statement : GOTO ID SEMI """
p[0] = c_ast.Goto(p[2], self._coord(p.lineno(1)))
def p_jump_statement_2(self, p):
""" jump_statement : BREAK SEMI """
p[0] = c_ast.Break(self._coord(p.lineno(1)))
def p_jump_statement_3(self, p):
""" jump_statement : CONTINUE SEMI """
p[0] = c_ast.Continue(self._coord(p.lineno(1)))
def p_jump_statement_4(self, p):
""" jump_statement : RETURN expression SEMI
| RETURN SEMI
"""
p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._coord(p.lineno(1)))
def p_expression_statement(self, p):
""" expression_statement : expression_opt SEMI """
if p[1] is None:
p[0] = c_ast.EmptyStatement(self._coord(p.lineno(1)))
else:
p[0] = p[1]
def p_expression(self, p):
""" expression : assignment_expression
| expression COMMA assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
if not isinstance(p[1], c_ast.ExprList):
p[1] = c_ast.ExprList([p[1]], p[1].coord)
p[1].exprs.append(p[3])
p[0] = p[1]
def p_typedef_name(self, p):
""" typedef_name : TYPEID """
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_assignment_expression(self, p):
""" assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord)
# K&R2 defines these as many separate rules, to encode
# precedence and associativity. Why work hard ? I'll just use
# the built in precedence/associativity specification feature
# of PLY. (see precedence declaration above)
#
def p_assignment_operator(self, p):
""" assignment_operator : EQUALS
| XOREQUAL
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
"""
p[0] = p[1]
def p_constant_expression(self, p):
""" constant_expression : conditional_expression """
p[0] = p[1]
def p_conditional_expression(self, p):
""" conditional_expression : binary_expression
| binary_expression CONDOP expression COLON conditional_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord)
def p_binary_expression(self, p):
""" binary_expression : cast_expression
| binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord)
def p_cast_type(self, p):
"""
cast_type : type_name
| template
"""
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_cast_expression_2(self, p):
""" cast_expression : LPAREN cast_type RPAREN cast_expression """
p[0] = c_ast.Cast(p[2], p[4], self._coord(p.lineno(1)))
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : PLUSPLUS unary_expression
| MINUSMINUS unary_expression
| unary_operator cast_expression
"""
p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord)
def p_unary_expression_3(self, p):
""" unary_expression : SIZEOF unary_expression
| SIZEOF LPAREN type_name RPAREN
"""
p[0] = c_ast.UnaryOp(
p[1],
p[2] if len(p) == 3 else p[3],
self._coord(p.lineno(1)))
def p_unary_operator(self, p):
""" unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT
"""
p[0] = p[1]
def p_postfix_expression_1(self, p):
""" postfix_expression : primary_expression """
p[0] = p[1]
def p_postfix_expression_2(self, p):
""" postfix_expression : postfix_expression LBRACKET expression RBRACKET """
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
def p_postfix_expression_3(self, p):
""" postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN
| postfix_expression LPAREN RPAREN
"""
p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
def p_postfix_expression_4(self, p):
""" postfix_expression : postfix_expression PERIOD identifier
| postfix_expression ARROW identifier
"""
p[0] = c_ast.StructRef(p[1], p[2], p[3], p[1].coord)
def p_postfix_expression_5(self, p):
""" postfix_expression : postfix_expression PLUSPLUS
| postfix_expression MINUSMINUS
"""
p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord)
def p_postfix_expression_6(self, p):
""" postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close
| LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close
"""
p[0] = c_ast.CompoundLiteral(p[2], p[5])
def p_primary_expression_1(self, p):
""" primary_expression : identifier """
p[0] = p[1]
def p_primary_expression_2(self, p):
""" primary_expression : constant """
p[0] = p[1]
def p_primary_expression_3(self, p):
""" primary_expression : unified_string_literal
| unified_wstring_literal
"""
p[0] = p[1]
def p_primary_expression_4(self, p):
""" primary_expression : LPAREN expression RPAREN """
p[0] = p[2]
def p_argument_expression_list(self, p):
""" argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression
"""
if len(p) == 2: # single expr
p[0] = c_ast.ExprList([p[1]], p[1].coord)
else:
p[1].exprs.append(p[3])
p[0] = p[1]
def p_identifier(self, p):
""" identifier : ID """
p[0] = c_ast.ID(p[1], self._coord(p.lineno(1)))
def p_constant_1(self, p):
""" constant : INT_CONST_DEC
| INT_CONST_OCT
| INT_CONST_HEX
"""
p[0] = c_ast.Constant(
'int', p[1], self._coord(p.lineno(1)))
def p_constant_2(self, p):
""" constant : FLOAT_CONST
| HEX_FLOAT_CONST
"""
p[0] = c_ast.Constant(
'float', p[1], self._coord(p.lineno(1)))
def p_constant_3(self, p):
""" constant : CHAR_CONST
| WCHAR_CONST
"""
p[0] = c_ast.Constant(
'char', p[1], self._coord(p.lineno(1)))
# The "unified" string and wstring literal rules are for supporting
# concatenation of adjacent string literals.
# I.e. "hello " "world" is seen by the C compiler as a single string literal
# with the value "hello world"
#
def p_unified_string_literal(self, p):
""" unified_string_literal : STRING_LITERAL
| unified_string_literal STRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value[:-1] + p[2][1:]
p[0] = p[1]
def p_unified_wstring_literal(self, p):
""" unified_wstring_literal : WSTRING_LITERAL
| unified_wstring_literal WSTRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value.rstrip[:-1] + p[2][1:]
p[0] = p[1]
def p_brace_open(self, p):
""" brace_open : LBRACE
"""
self._push_scope()
p[0] = p[1]
def p_brace_close(self, p):
""" brace_close : RBRACE
"""
self._pop_scope()
p[0] = p[1]
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
if p:
self._parse_error(
'before: %s' % p.value,
self._coord(lineno=p.lineno,
column=self.clex.find_tok_column(p)))
else:
self._parse_error('At end of input', '')
#------------------------------------------------------------------------------
if __name__ == "__main__":
import pprint
import time, sys
#t1 = time.time()
#parser = CParser(lex_optimize=True, yacc_debug=True, yacc_optimize=False)
#sys.write(time.time() - t1)
#buf = '''
#int (*k)(int);
#'''
## set debuglevel to 2 for debugging
#t = parser.parse(buf, 'x.c', debuglevel=0)
#t.show(showcoord=True)
|
|
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
import os
import sys
from PyQt5 import QtCore
from PyQt5 import QtWidgets
import constants
from image import Image
from view import MainWindow
class Controller(QtCore.QObject):
"""
Application controller. Coordinates between view objects and Image
state/model object.
"""
def __init__(self, app):
super().__init__()
self.app = app
self.src_dir = None
self.dest_file = None
self.images = None
self.current_image = None
self.main_window = None
# These methods will launch their corresponding state. These states
# Determine the high level application flow.
self._state_launchers = deque([
self._choose_src_dir_and_dest_file,
self._create_main_window,
self.app.quit,
])
def run(self):
"""
Starts the application.
"""
self._next_state()
def show_next_image(self, write=False):
"""
Displays the next image in images. If there is an image open and write
is True, that image's data will be written to dest_file.
"""
if write and self.current_image is not None:
self._write_data(str(self.current_image))
try:
impath = self.images.pop()
except IndexError:
self._next_state()
return
self.current_image = Image(impath)
self.main_window.image_layout.show_image(self.current_image.resized_cv_img)
self.main_window.ctrl_layout.reset()
self.main_window._resize()
def _choose_src_dir_and_dest_file(self):
"""
Application setup. Gets src dir and dest file, if not specified from
command line or command line args were invalid.
"""
if len(sys.argv) > 1 and os.path.isdir(sys.argv[1]):
self.src_dir = sys.argv[1]
else:
self.src_dir = QtWidgets.QFileDialog.getExistingDirectory(
caption='Select source directory')
if len(sys.argv) > 2 and \
(os.path.dirname(sys.argv[2]) == '' or
os.path.isdir(os.path.dirname(sys.argv[2]))):
self.dest_file = sys.argv[2]
else:
self.dest_file = QtWidgets.QFileDialog.getSaveFileName(
caption='Select save file')[0]
try:
# Create output file
open(self.dest_file, 'w').close()
self.images = self._get_image_files(self.src_dir)
except (IOError, OSError):
self.app.quit()
return
self._next_state()
def _connect_signals_and_slots(self):
"""
Connects view and model with signals and slots.
"""
# Save line length
ctrl = self.main_window.ctrl_layout
img = self.main_window.image_layout
# Wire up skip image button
ctrl.skip_button.clicked.connect(self.show_next_image)
# Wire up image type dropdown
ctrl.type_dropdown.currentIndexChanged[str].connect(
self._set_image_type)
# Wire up solar perimeter button
ctrl.solar_button.clicked.connect(
lambda _: self._set_image_circle(Image.SOLAR))
# Wire up lunar perimeter button
ctrl.lunar_button.clicked.connect(
lambda _: self._set_image_circle(Image.LUNAR))
# Wire up reset button
ctrl.reset_button.clicked.connect(self._reset)
# Wire up save button
ctrl.save_button.clicked.connect(
lambda _: self.show_next_image(write=True))
# Handle click events on the displayed image
img.mouse_pressed.connect(self._record_mouse_press)
def _create_main_window(self):
"""
Creates the main window initiating the main application state.
"""
self.main_window = MainWindow()
self._connect_signals_and_slots()
self.show_next_image()
self.main_window.show()
def _disable_circle_buttons(self):
"""
Disables [solar|lunar]_circle buttons if their corresponding circles
have been defined.
"""
if self.current_image.solar_circle is not None:
self.main_window.ctrl_layout.solar_button.setEnabled(False)
if self.current_image.lunar_circle is not None:
self.main_window.ctrl_layout.lunar_button.setEnabled(False)
def _enable_disable_save(self):
"""
Enables/disables the save button depending on whether or not the
current_image's complete flag is set.
"""
self.main_window.ctrl_layout.save_button.setEnabled(
self.current_image.complete)
def _next_state(self):
"""
Launches the next application state.
"""
state = self._state_launchers.popleft()
timer = QtCore.QTimer(self.app)
timer.setSingleShot(True)
timer.timeout.connect(state)
timer.start(0)
def _record_mouse_press(self, point):
"""
Adds a point clicked by the user to current_image. Updates view
accordingly.
"""
self.current_image.add_point(point)
# If the active circle is fully defined
if self.current_image.active_circle_complete():
# Compute circle center/radius, visualize it on
# current_image.resized_cv_img
self.current_image.process_active_circle()
# Update the displayed image to have the newly added circle overlay
self.main_window.image_layout.show_image(
self.current_image.resized_cv_img)
# Disable circle buttons, enable/disable save button as necessary
self._disable_circle_buttons()
self._enable_disable_save()
def _reset(self):
"""
Reset state for current image. Resets the Image object and UI.
"""
self.main_window.ctrl_layout.reset()
self.current_image.reset()
self.main_window.image_layout.show_image(self.current_image.resized_cv_img)
def _set_image_circle(self, circle):
"""
Activates the circle chosen by the user.
"""
self.current_image.activate_circle(circle)
self._enable_disable_save()
def _set_image_type(self, img_type):
"""
Updates the Image.type flag based on user dropdown input.
"""
self.current_image.type = img_type
self.current_image.set_complete()
self._enable_disable_save()
def _write_data(self, data):
"""
Write data + '\n' to dest_file
"""
with open(self.dest_file, 'a') as f:
f.write(data + '\n')
@staticmethod
def _get_image_files(directory):
"""
Returns a list of image files (defined as files ending with extensions in
constants.ALLOWED_IMAGE_FORMATS) in directory.
"""
images = list()
for f in os.listdir(directory):
fname = f.lower()
for ext in constants.ALLOWED_IMAGE_FORMATS:
if fname.endswith('.' + ext):
images.append(os.path.join(directory, f))
return images
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for storing and getting objects from datastore.
This module provides Get, Set and Delete functions for storing pickleable
objects in datastore, with support for large objects greater than 1 MB.
Although this module contains ndb.Model classes, these are not intended
to be used directly by other modules.
App Engine datastore limits entity size to less than 1 MB; this module
supports storing larger objects by splitting the data and using multiple
datastore entities and multiple memcache keys. Using ndb.get and pickle, a
complex data structure can be retrieved more quickly than datastore fetch.
Example:
john = Account()
john.username = 'John'
john.userid = 123
stored_object.Set(john.userid, john)
"""
import cPickle as pickle
import logging
from google.appengine.api import memcache
from google.appengine.ext import ndb
_MULTIPART_ENTITY_MEMCACHE_KEY = 'multipart_entity_'
# Maximum number of entities and memcache to save a value.
# The limit for data stored in one datastore entity is 1 MB,
# and the limit for memcache batch operations is 32 MB. See:
# https://cloud.google.com/appengine/docs/python/memcache/#Python_Limits
_MAX_NUM_PARTS = 16
# Max bytes per entity or value cached with memcache.
_CHUNK_SIZE = 1000 * 1000
@ndb.synctasklet
def Get(key):
"""Gets the value.
Args:
key: String key value.
Returns:
A value for key.
"""
result = yield GetAsync(key)
raise ndb.Return(result)
@ndb.tasklet
def GetAsync(key):
results = yield MultipartCache.GetAsync(key)
if not results:
set_future = MultipartCache.SetAsync(key, results)
get_future = _GetValueFromDatastore(key)
yield set_future, get_future
results = get_future.get_result()
raise ndb.Return(results)
@ndb.synctasklet
def Set(key, value):
"""Sets the value in datastore and memcache with limit of '_MAX_NUM_PARTS' MB.
Args:
key: String key value.
value: A pickleable value to be stored limited at '_MAX_NUM_PARTS' MB.
"""
yield SetAsync(key, value)
@ndb.tasklet
def SetAsync(key, value):
entity = yield ndb.Key(MultipartEntity, key).get_async()
if not entity:
entity = MultipartEntity(id=key)
entity.SetData(value)
yield (entity.PutAsync(),
MultipartCache.SetAsync(key, value))
@ndb.synctasklet
def Delete(key):
"""Deletes the value in datastore and memcache."""
yield DeleteAsync(key)
@ndb.tasklet
def DeleteAsync(key):
multipart_entity_key = ndb.Key(MultipartEntity, key)
yield (multipart_entity_key.delete_async(),
MultipartEntity.DeleteAsync(multipart_entity_key),
MultipartCache.DeleteAsync(key))
class MultipartEntity(ndb.Model):
"""Container for PartEntity."""
# Number of entities use to store serialized.
size = ndb.IntegerProperty(default=0, indexed=False)
@ndb.tasklet
def GetPartsAsync(self):
"""Deserializes data from multiple PartEntity."""
if not self.size:
raise ndb.Return(None)
string_id = self.key.string_id()
part_keys = [ndb.Key(MultipartEntity, string_id, PartEntity, i + 1)
for i in xrange(self.size)]
part_entities = yield ndb.get_multi_async(part_keys)
serialized = ''.join(p.value for p in part_entities if p is not None)
self.SetData(pickle.loads(serialized))
@classmethod
@ndb.tasklet
def DeleteAsync(cls, key):
part_keys = yield PartEntity.query(ancestor=key).fetch_async(keys_only=True)
yield ndb.delete_multi_async(part_keys)
@ndb.tasklet
def PutAsync(self):
"""Stores serialized data over multiple PartEntity."""
serialized_parts = _Serialize(self.GetData())
if len(serialized_parts) > _MAX_NUM_PARTS:
logging.error('Max number of parts reached.')
return
part_list = []
num_parts = len(serialized_parts)
for i in xrange(num_parts):
if serialized_parts[i] is not None:
part = PartEntity(id=i + 1, parent=self.key, value=serialized_parts[i])
part_list.append(part)
self.size = num_parts
yield ndb.put_multi_async(part_list + [self])
def GetData(self):
return getattr(self, '_data', None)
def SetData(self, data):
setattr(self, '_data', data)
class PartEntity(ndb.Model):
"""Holds a part of serialized data for MultipartEntity.
This entity key has the form:
ndb.Key('MultipartEntity', multipart_entity_id, 'PartEntity', part_index)
"""
value = ndb.BlobProperty()
class MultipartCache(object):
"""Contains operations for storing values over multiple memcache keys.
Values are serialized, split, and stored over multiple memcache keys. The
head cache stores the expected size.
"""
@classmethod
@ndb.tasklet
def GetAsync(cls, key):
"""Gets value in memcache."""
keys = cls._GetCacheKeyList(key)
head_key = cls._GetCacheKey(key)
client = memcache.Client()
cache_values = yield client.get_multi_async(keys)
# Whether we have all the memcache values.
if len(keys) != len(cache_values) or head_key not in cache_values:
raise ndb.Return(None)
serialized = ''
cache_size = cache_values[head_key]
keys.remove(head_key)
for key in keys[:cache_size]:
if key not in cache_values:
raise ndb.Return(None)
if cache_values[key] is not None:
serialized += cache_values[key]
raise ndb.Return(pickle.loads(serialized))
@classmethod
@ndb.tasklet
def SetAsync(cls, key, value):
"""Sets a value in memcache."""
serialized_parts = _Serialize(value)
if len(serialized_parts) > _MAX_NUM_PARTS:
logging.error('Max number of parts reached.')
raise ndb.Return(None)
cached_values = {}
cached_values[cls._GetCacheKey(key)] = len(serialized_parts)
for i in xrange(len(serialized_parts)):
cached_values[cls._GetCacheKey(key, i)] = serialized_parts[i]
client = memcache.Client()
yield client.set_multi_async(cached_values)
@classmethod
@ndb.synctasklet
def Delete(cls, key):
"""Deletes all cached values for key."""
yield cls.DeleteAsync(key)
@classmethod
@ndb.tasklet
def DeleteAsync(cls, key):
client = memcache.Client()
yield client.delete_multi_async(cls._GetCacheKeyList(key))
@classmethod
def _GetCacheKeyList(cls, key):
"""Gets a list of head cache key and cache key parts."""
keys = [cls._GetCacheKey(key, i) for i in xrange(_MAX_NUM_PARTS)]
keys.append(cls._GetCacheKey(key))
return keys
@classmethod
def _GetCacheKey(cls, key, index=None):
"""Returns either head cache key or cache key part."""
if index is not None:
return _MULTIPART_ENTITY_MEMCACHE_KEY + '%s.%s' % (key, index)
return _MULTIPART_ENTITY_MEMCACHE_KEY + key
@ndb.tasklet
def _GetValueFromDatastore(key):
entity = yield ndb.Key(MultipartEntity, key).get_async()
if not entity:
raise ndb.Return(None)
yield entity.GetPartsAsync()
raise ndb.Return(entity.GetData())
def _Serialize(value):
"""Serializes value and returns a list of its parts.
Args:
value: A pickleable value.
Returns:
A list of string representation of the value that has been pickled and split
into _CHUNK_SIZE.
"""
serialized = pickle.dumps(value, 2)
length = len(serialized)
values = []
for i in xrange(0, length, _CHUNK_SIZE):
values.append(serialized[i:i + _CHUNK_SIZE])
for i in xrange(len(values), _MAX_NUM_PARTS):
values.append(None)
return values
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import inspect
import os
import random
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import loopingcall
from oslo_service import service
from oslo_service import wsgi
from oslo_utils import importutils
osprofiler_initializer = importutils.try_import('osprofiler.initializer')
profiler = importutils.try_import('osprofiler.profiler')
profiler_opts = importutils.try_import('osprofiler.opts')
from cinder.backup import rpcapi as backup_rpcapi
from cinder.common import constants
from cinder import context
from cinder import coordination
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import objects
from cinder.objects import base as objects_base
from cinder.objects import fields
from cinder import rpc
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import version
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='Interval, in seconds, between nodes reporting state '
'to datastore'),
cfg.IntOpt('periodic_interval',
default=60,
help='Interval, in seconds, between running periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='Range, in seconds, to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.StrOpt('osapi_volume_listen',
default="0.0.0.0",
help='IP address on which OpenStack Volume API listens'),
cfg.PortOpt('osapi_volume_listen_port',
default=8776,
help='Port on which OpenStack Volume API listens'),
cfg.IntOpt('osapi_volume_workers',
help='Number of workers for OpenStack Volume API service. '
'The default is equal to the number of CPUs available.'),
cfg.BoolOpt('osapi_volume_use_ssl',
default=False,
help='Wraps the socket in a SSL context if True is set. '
'A certificate file and key file must be specified.'), ]
CONF = cfg.CONF
CONF.register_opts(service_opts)
if profiler_opts:
profiler_opts.set_defaults(CONF)
def setup_profiler(binary, host):
if (osprofiler_initializer is None or
profiler is None or
profiler_opts is None):
LOG.debug('osprofiler is not present')
return
if CONF.profiler.enabled:
osprofiler_initializer.init_from_conf(
conf=CONF,
context=context.get_admin_context().to_dict(),
project="cinder",
service=binary,
host=host
)
LOG.warning(
_LW("OSProfiler is enabled.\nIt means that person who knows "
"any of hmac_keys that are specified in "
"/etc/cinder/cinder.conf can trace his requests. \n"
"In real life only operator can read this file so there "
"is no security issue. Note that even if person can "
"trigger profiler, only admin user can retrieve trace "
"information.\n"
"To disable OSProfiler set in cinder.conf:\n"
"[profiler]\nenabled=false"))
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
# Make service_id a class attribute so it can be used for clean up
service_id = None
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_interval=None, periodic_fuzzy_delay=None,
service_name=None, coordination=False, cluster=None, *args,
**kwargs):
super(Service, self).__init__()
if not rpc.initialized():
rpc.init(CONF)
self.cluster = cluster
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
self.coordination = coordination
manager_class = importutils.import_class(self.manager_class_name)
if CONF.profiler.enabled:
manager_class = profiler.trace_cls("rpc")(manager_class)
self.service = None
# NOTE(geguileo): We need to create the Service DB entry before we
# create the manager, otherwise capped versions for serializer and rpc
# client would use existing DB entries not including us, which could
# result in us using None (if it's the first time the service is run)
# or an old version (if this is a normal upgrade of a single service).
ctxt = context.get_admin_context()
self.is_upgrading_to_n = self.is_svc_upgrading_to_n(binary)
try:
service_ref = objects.Service.get_by_args(ctxt, host, binary)
service_ref.rpc_current_version = manager_class.RPC_API_VERSION
obj_version = objects_base.OBJ_VERSIONS.get_current()
service_ref.object_current_version = obj_version
# TODO(geguileo): In O we can remove the service upgrading part on
# the next equation, because by then all our services will be
# properly setting the cluster during volume migrations since
# they'll have the new Volume ORM model. But until then we can
# only set the cluster in the DB and pass added_to_cluster to
# init_host when we have completed the rolling upgrade from M to N.
# added_to_cluster attribute marks when we consider that we have
# just added a host to a cluster so we can include resources into
# that cluster. We consider that we have added the host when we
# didn't have data in the cluster DB field and our current
# configuration has a cluster value. We don't want to do anything
# automatic if the cluster is changed, in those cases we'll want
# to use cinder manage command and to it manually.
self.added_to_cluster = (not service_ref.cluster_name and cluster
and not self.is_upgrading_to_n)
# TODO(geguileo): In O - Remove self.is_upgrading_to_n part
if (service_ref.cluster_name != cluster and
not self.is_upgrading_to_n):
LOG.info(_LI('This service has been moved from cluster '
'%(cluster_svc)s to %(cluster_cfg)s. Resources '
'will %(opt_no)sbe moved to the new cluster'),
{'cluster_svc': service_ref.cluster_name,
'cluster_cfg': cluster,
'opt_no': '' if self.added_to_cluster else 'NO '})
if self.added_to_cluster:
# We pass copy service's disable status in the cluster if we
# have to create it.
self._ensure_cluster_exists(ctxt, service_ref)
service_ref.cluster_name = cluster
service_ref.save()
Service.service_id = service_ref.id
except exception.NotFound:
# We don't want to include cluster information on the service or
# create the cluster entry if we are upgrading.
self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)
# TODO(geguileo): In O set added_to_cluster to True
# We don't want to include resources in the cluster during the
# start while we are still doing the rolling upgrade.
self.added_to_cluster = not self.is_upgrading_to_n
self.manager = manager_class(host=self.host,
cluster=self.cluster,
service_name=service_name,
*args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.basic_config_check()
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
setup_profiler(binary, host)
self.rpcserver = None
self.backend_rpcserver = None
self.cluster_rpcserver = None
# TODO(geguileo): Remove method in O since it will no longer be used.
@staticmethod
def is_svc_upgrading_to_n(binary):
"""Given an RPC API class determine if the service is upgrading."""
rpcapis = {'cinder-scheduler': scheduler_rpcapi.SchedulerAPI,
'cinder-volume': volume_rpcapi.VolumeAPI,
'cinder-backup': backup_rpcapi.BackupAPI}
rpc_api = rpcapis[binary]
# If we are pinned to 1.3, then we are upgrading from M to N
return rpc_api.determine_obj_version_cap() == '1.3'
def start(self):
version_string = version.version_string()
LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
{'topic': self.topic, 'version_string': version_string})
self.model_disconnected = False
if self.coordination:
coordination.COORDINATOR.start()
self.manager.init_host(added_to_cluster=self.added_to_cluster,
service_id=Service.service_id)
LOG.debug("Creating RPC server for service %s", self.topic)
ctxt = context.get_admin_context()
endpoints = [self.manager]
endpoints.extend(self.manager.additional_endpoints)
obj_version_cap = objects.Service.get_minimum_obj_version(ctxt)
LOG.debug("Pinning object versions for RPC server serializer to %s",
obj_version_cap)
serializer = objects_base.CinderObjectSerializer(obj_version_cap)
target = messaging.Target(topic=self.topic, server=self.host)
self.rpcserver = rpc.get_server(target, endpoints, serializer)
self.rpcserver.start()
# NOTE(dulek): Kids, don't do that at home. We're relying here on
# oslo.messaging implementation details to keep backward compatibility
# with pre-Ocata services. This will not matter once we drop
# compatibility with them.
if self.topic == constants.VOLUME_TOPIC:
target = messaging.Target(
topic='%(topic)s.%(host)s' % {'topic': self.topic,
'host': self.host},
server=vol_utils.extract_host(self.host, 'host'))
self.backend_rpcserver = rpc.get_server(target, endpoints,
serializer)
self.backend_rpcserver.start()
# TODO(geguileo): In O - Remove the is_svc_upgrading_to_n part
if self.cluster and not self.is_svc_upgrading_to_n(self.binary):
LOG.info(_LI('Starting %(topic)s cluster %(cluster)s (version '
'%(version)s)'),
{'topic': self.topic, 'version': version_string,
'cluster': self.cluster})
target = messaging.Target(
topic='%s.%s' % (self.topic, self.cluster),
server=vol_utils.extract_host(self.cluster, 'host'))
serializer = objects_base.CinderObjectSerializer(obj_version_cap)
self.cluster_rpcserver = rpc.get_server(target, endpoints,
serializer)
self.cluster_rpcserver.start()
self.manager.init_host_with_rpc()
if self.report_interval:
pulse = loopingcall.FixedIntervalLoopingCall(
self.report_state)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
self.timers.append(pulse)
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.FixedIntervalLoopingCall(
self.periodic_tasks)
periodic.start(interval=self.periodic_interval,
initial_delay=initial_delay)
self.timers.append(periodic)
def basic_config_check(self):
"""Perform basic config checks before starting service."""
# Make sure report interval is less than service down time
if self.report_interval:
if CONF.service_down_time <= self.report_interval:
new_down_time = int(self.report_interval * 2.5)
LOG.warning(
_LW("Report interval must be less than service down "
"time. Current config service_down_time: "
"%(service_down_time)s, report_interval for this: "
"service is: %(report_interval)s. Setting global "
"service_down_time to: %(new_down_time)s"),
{'service_down_time': CONF.service_down_time,
'report_interval': self.report_interval,
'new_down_time': new_down_time})
CONF.set_override('service_down_time', new_down_time)
def _ensure_cluster_exists(self, context, service):
if self.cluster:
try:
cluster = objects.Cluster.get_by_id(context, None,
name=self.cluster,
binary=self.binary)
# If the cluster already exists, then the service replication
# fields must match those of the cluster unless the service
# is in error status.
error_states = (fields.ReplicationStatus.ERROR,
fields.ReplicationStatus.FAILOVER_ERROR)
if service.replication_status not in error_states:
for attr in ('replication_status', 'active_backend_id',
'frozen'):
if getattr(service, attr) != getattr(cluster, attr):
setattr(service, attr, getattr(cluster, attr))
except exception.ClusterNotFound:
# Since the cluster didn't exist, we copy replication fields
# from the service.
cluster = objects.Cluster(
context=context,
name=self.cluster,
binary=self.binary,
disabled=service.disabled,
replication_status=service.replication_status,
active_backend_id=service.active_backend_id,
frozen=service.frozen)
try:
cluster.create()
# Race condition occurred and another service created the
# cluster, so we can continue as it already exists.
except exception.ClusterExists:
pass
def _create_service_ref(self, context, rpc_version=None):
zone = CONF.storage_availability_zone
kwargs = {
'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': zone,
'rpc_current_version': rpc_version or self.manager.RPC_API_VERSION,
'object_current_version': objects_base.OBJ_VERSIONS.get_current(),
}
# TODO(geguileo): In O unconditionally set cluster_name like above
# If we are upgrading we have to ignore the cluster value
if not self.is_upgrading_to_n:
kwargs['cluster_name'] = self.cluster
service_ref = objects.Service(context=context, **kwargs)
service_ref.create()
Service.service_id = service_ref.id
# TODO(geguileo): In O unconditionally ensure that the cluster exists
if not self.is_upgrading_to_n:
self._ensure_cluster_exists(context, service_ref)
# If we have updated the service_ref with replication data from
# the cluster it will be saved.
service_ref.save()
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_interval=None,
periodic_fuzzy_delay=None, service_name=None,
coordination=False, cluster=None):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'cinder-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_interval: defaults to CONF.periodic_interval
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
:param cluster: Defaults to None, as only some services will have it
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary
if not manager:
subtopic = topic.rpartition('cinder-')[2]
manager = CONF.get('%s_manager' % subtopic, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_interval is None:
periodic_interval = CONF.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay,
service_name=service_name,
coordination=coordination,
cluster=cluster)
return service_obj
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.rpcserver.stop()
if self.backend_rpcserver:
self.backend_rpcserver.stop()
if self.cluster_rpcserver:
self.cluster_rpcserver.stop()
except Exception:
pass
self.timers_skip = []
for x in self.timers:
try:
x.stop()
except Exception:
self.timers_skip.append(x)
if self.coordination:
try:
coordination.COORDINATOR.stop()
except Exception:
pass
super(Service, self).stop(graceful=True)
def wait(self):
skip = getattr(self, 'timers_skip', [])
for x in self.timers:
if x not in skip:
try:
x.wait()
except Exception:
pass
if self.rpcserver:
self.rpcserver.wait()
if self.backend_rpcserver:
self.backend_rpcserver.wait()
if self.cluster_rpcserver:
self.cluster_rpcserver.wait()
super(Service, self).wait()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def report_state(self):
"""Update the state of this service in the datastore."""
if not self.manager.is_working():
# NOTE(dulek): If manager reports a problem we're not sending
# heartbeats - to indicate that service is actually down.
LOG.error(_LE('Manager for service %(binary)s %(host)s is '
'reporting problems, not sending heartbeat. '
'Service will appear "down".'),
{'binary': self.binary,
'host': self.host})
return
ctxt = context.get_admin_context()
zone = CONF.storage_availability_zone
try:
try:
service_ref = objects.Service.get_by_id(ctxt,
Service.service_id)
except exception.NotFound:
LOG.debug('The service database object disappeared, '
'recreating it.')
self._create_service_ref(ctxt)
service_ref = objects.Service.get_by_id(ctxt,
Service.service_id)
service_ref.report_count += 1
if zone != service_ref.availability_zone:
service_ref.availability_zone = zone
service_ref.save()
# TODO(termie): make this pattern be more elegant.
if getattr(self, 'model_disconnected', False):
self.model_disconnected = False
LOG.error(_LE('Recovered model server connection!'))
except db_exc.DBConnectionError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('model server went away'))
# NOTE(jsbryant) Other DB errors can happen in HA configurations.
# such errors shouldn't kill this thread, so we handle them here.
except db_exc.DBError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('DBError encountered: '))
except Exception:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('Exception encountered: '))
def reset(self):
self.manager.reset()
super(Service, self).reset()
class WSGIService(service.ServiceBase):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader(CONF)
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.use_ssl = getattr(CONF, '%s_use_ssl' % name, False)
self.workers = (getattr(CONF, '%s_workers' % name, None) or
processutils.get_worker_count())
if self.workers and self.workers < 1:
worker_name = '%s_workers' % name
msg = (_("%(worker_name)s value of %(workers)d is invalid, "
"must be greater than 0.") %
{'worker_name': worker_name,
'workers': self.workers})
raise exception.InvalidInput(msg)
setup_profiler(name, self.host)
self.server = wsgi.Server(CONF,
name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl)
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.server.start()
self.port = self.server.port
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def process_launcher():
return service.ProcessLauncher(CONF)
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(CONF, server, workers=workers)
def wait():
LOG.debug('Full set of CONF:')
for flag in CONF:
flag_get = CONF.get(flag, None)
# hide flag contents from log if contains a password
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and
("mysql:" in flag_get or "postgresql:" in flag_get))):
LOG.debug('%s : FLAG SET ', flag)
else:
LOG.debug('%(flag)s : %(flag_get)s',
{'flag': flag, 'flag_get': flag_get})
try:
_launcher.wait()
except KeyboardInterrupt:
_launcher.stop()
rpc.cleanup()
class Launcher(object):
def __init__(self):
self.launch_service = serve
self.wait = wait
def get_launcher():
# Note(lpetrut): ProcessLauncher uses green pipes which fail on Windows
# due to missing support of non-blocking I/O pipes. For this reason, the
# service must be spawned differently on Windows, using the ServiceLauncher
# class instead.
if os.name == 'nt':
return Launcher()
else:
return process_launcher()
|
|
import logging
import requests
import simplejson
from time import sleep
API_URL = "https://home.nest.com"
CAMERA_WEBAPI_BASE = "https://webapi.camera.home.nest.com"
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) " \
"AppleWebKit/537.36 (KHTML, like Gecko) " \
"Chrome/75.0.3770.100 Safari/537.36"
URL_JWT = "https://nestauthproxyservice-pa.googleapis.com/v1/issue_jwt"
# Nest website's (public) API key
NEST_API_KEY = "AIzaSyAdkSIMNc51XGNEAYWasX9UOWkS5P6sZE4"
KNOWN_BUCKET_TYPES = [
# Thermostats
"device",
"shared",
# Protect
"topaz",
# Temperature sensors
"kryptonite",
# Cameras
"quartz"
]
_LOGGER = logging.getLogger(__name__)
class NestAPI():
def __init__(self, user_id, access_token, issue_token, cookie, region):
"""Badnest Google Nest API interface."""
self.device_data = {}
self._wheres = {}
self._user_id = user_id
self._access_token = access_token
self._session = requests.Session()
self._session.headers.update({
"Referer": "https://home.nest.com/",
"User-Agent": USER_AGENT,
})
self._issue_token = issue_token
self._cookie = cookie
self._czfe_url = None
self._camera_url = f'https://nexusapi-{region}1.camera.home.nest.com'
self.cameras = []
self.thermostats = []
self.temperature_sensors = []
self.hotwatercontrollers = []
self.switches = []
self.protects = []
self.login()
self._get_devices()
self.update()
def __getitem__(self, name):
"""Get attribute."""
return getattr(self, name)
def __setitem__(self, name, value):
"""Set attribute."""
return setattr(self, name, value)
def __delitem__(self, name):
"""Delete attribute."""
return delattr(self, name)
def __contains__(self, name):
"""Has attribute."""
return hasattr(self, name)
def login(self):
if self._issue_token and self._cookie:
self._login_google(self._issue_token, self._cookie)
self._login_dropcam()
def _login_google(self, issue_token, cookie):
headers = {
'User-Agent': USER_AGENT,
'Sec-Fetch-Mode': 'cors',
'X-Requested-With': 'XmlHttpRequest',
'Referer': 'https://accounts.google.com/o/oauth2/iframe',
'cookie': cookie
}
r = self._session.get(url=issue_token, headers=headers)
access_token = r.json()['access_token']
headers = {
'User-Agent': USER_AGENT,
'Authorization': 'Bearer ' + access_token,
'x-goog-api-key': NEST_API_KEY,
'Referer': 'https://home.nest.com'
}
params = {
"embed_google_oauth_access_token": True,
"expire_after": '3600s',
"google_oauth_access_token": access_token,
"policy_id": 'authproxy-oauth-policy'
}
r = self._session.post(url=URL_JWT, headers=headers, params=params)
self._user_id = r.json()['claims']['subject']['nestId']['id']
self._access_token = r.json()['jwt']
self._session.headers.update({
"Authorization": f"Basic {self._access_token}",
})
def _login_dropcam(self):
self._session.post(
f"{API_URL}/dropcam/api/login",
data={"access_token": self._access_token}
)
def _get_cameras_updates_pt2(self, sn):
try:
headers = {
'User-Agent': USER_AGENT,
'X-Requested-With': 'XmlHttpRequest',
'Referer': 'https://home.nest.com/',
'cookie': f"user_token={self._access_token}"
}
r = self._session.get(
f"{CAMERA_WEBAPI_BASE}/api/cameras.get_with_properties?uuid="+sn, headers=headers)
if str(r.json()["status"]).startswith(str(5)):
_LOGGER.debug('The Google proxy server sometimes gets a bit unhappy trying again')
sleep(4)
r = self._session.get(
f"{CAMERA_WEBAPI_BASE}/api/cameras.get_with_properties?uuid="+sn, headers=headers)
sensor_data = r.json()["items"][0]
self.device_data[sn]['chime_state'] = \
sensor_data["properties"]["doorbell.indoor_chime.enabled"]
except (requests.exceptions.RequestException, IndexError) as e:
_LOGGER.error(e)
_LOGGER.error('Failed to get camera update pt2, trying again')
self._get_cameras_updates_pt2(sn)
except KeyError:
_LOGGER.debug('Failed to get camera update pt2, trying to log in again')
self.login()
self._get_cameras_updates_pt2(sn)
def _get_devices(self):
try:
r = self._session.post(
f"{API_URL}/api/0.1/user/{self._user_id}/app_launch",
json={
"known_bucket_types": ["buckets"],
"known_bucket_versions": [],
},
headers={"Authorization": f"Basic {self._access_token}"},
)
self._czfe_url = r.json()["service_urls"]["urls"]["czfe_url"]
buckets = r.json()['updated_buckets'][0]['value']['buckets']
for bucket in buckets:
if bucket.startswith('topaz.'):
sn = bucket.replace('topaz.', '')
self.protects.append(sn)
self.device_data[sn] = {}
elif bucket.startswith('kryptonite.'):
sn = bucket.replace('kryptonite.', '')
self.temperature_sensors.append(sn)
self.device_data[sn] = {}
elif bucket.startswith('device.'):
sn = bucket.replace('device.', '')
self.thermostats.append(sn)
self.temperature_sensors.append(sn)
self.hotwatercontrollers.append(sn)
self.device_data[sn] = {}
elif bucket.startswith('quartz.'):
sn = bucket.replace('quartz.', '')
self.cameras.append(sn)
self.switches.append(sn)
self.device_data[sn] = {}
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to get devices, trying again')
return self.get_devices()
except KeyError:
_LOGGER.debug('Failed to get devices, trying to log in again')
self.login()
return self.get_devices()
@classmethod
def _map_nest_protect_state(cls, value):
if value == 0:
return "Ok"
elif value == 1 or value == 2:
return "Warning"
elif value == 3:
return "Emergency"
else:
return "Unknown"
def update(self):
try:
# To get friendly names
r = self._session.post(
f"{API_URL}/api/0.1/user/{self._user_id}/app_launch",
json={
"known_bucket_types": ["where"],
"known_bucket_versions": [],
},
headers={"Authorization": f"Basic {self._access_token}"},
)
for bucket in r.json()["updated_buckets"]:
sensor_data = bucket["value"]
sn = bucket["object_key"].split('.')[1]
if bucket["object_key"].startswith(
f"where.{sn}"):
wheres = sensor_data['wheres']
for where in wheres:
self._wheres[where['where_id']] = where['name']
r = self._session.post(
f"{API_URL}/api/0.1/user/{self._user_id}/app_launch",
json={
"known_bucket_types": KNOWN_BUCKET_TYPES,
"known_bucket_versions": [],
},
headers={"Authorization": f"Basic {self._access_token}"},
)
for bucket in r.json()["updated_buckets"]:
sensor_data = bucket["value"]
sn = bucket["object_key"].split('.')[1]
# Thermostats (thermostat and sensors system)
if bucket["object_key"].startswith(
f"shared.{sn}"):
self.device_data[sn]['current_temperature'] = \
sensor_data["current_temperature"]
self.device_data[sn]['target_temperature'] = \
sensor_data["target_temperature"]
self.device_data[sn]['hvac_ac_state'] = \
sensor_data["hvac_ac_state"]
self.device_data[sn]['hvac_heater_state'] = \
sensor_data["hvac_heater_state"]
self.device_data[sn]['target_temperature_high'] = \
sensor_data["target_temperature_high"]
self.device_data[sn]['target_temperature_low'] = \
sensor_data["target_temperature_low"]
self.device_data[sn]['can_heat'] = \
sensor_data["can_heat"]
self.device_data[sn]['can_cool'] = \
sensor_data["can_cool"]
self.device_data[sn]['mode'] = \
sensor_data["target_temperature_type"]
if self.device_data[sn]['hvac_ac_state']:
self.device_data[sn]['action'] = "cooling"
elif self.device_data[sn]['hvac_heater_state']:
self.device_data[sn]['action'] = "heating"
else:
self.device_data[sn]['action'] = "off"
# Thermostats, pt 2
elif bucket["object_key"].startswith(
f"device.{sn}"):
self.device_data[sn]['name'] = self._wheres[
sensor_data['where_id']
]
# When acts as a sensor
if 'backplate_temperature' in sensor_data:
self.device_data[sn]['temperature'] = \
sensor_data['backplate_temperature']
if 'battery_level' in sensor_data:
self.device_data[sn]['battery_level'] = \
sensor_data['battery_level']
if sensor_data.get('description', None):
self.device_data[sn]['name'] += \
f' ({sensor_data["description"]})'
self.device_data[sn]['name'] += ' Thermostat'
self.device_data[sn]['has_fan'] = \
sensor_data["has_fan"]
self.device_data[sn]['fan'] = \
sensor_data["fan_timer_timeout"]
self.device_data[sn]['current_humidity'] = \
sensor_data["current_humidity"]
self.device_data[sn]['target_humidity'] = \
sensor_data["target_humidity"]
self.device_data[sn]['target_humidity_enabled'] = \
sensor_data["target_humidity_enabled"]
if sensor_data["eco"]["mode"] == 'manual-eco' or \
sensor_data["eco"]["mode"] == 'auto-eco':
self.device_data[sn]['eco'] = True
else:
self.device_data[sn]['eco'] = False
# Hot water
# - Status
self.device_data[sn]['has_hot_water_control'] = \
sensor_data["has_hot_water_control"]
self.device_data[sn]['hot_water_status'] = \
sensor_data["hot_water_active"]
self.device_data[sn]['hot_water_actively_heating'] = \
sensor_data["hot_water_boiling_state"]
self.device_data[sn]['hot_water_away_active'] = \
sensor_data["hot_water_away_active"]
# - Status/Settings
self.device_data[sn]['hot_water_timer_mode'] = \
sensor_data["hot_water_mode"]
self.device_data[sn]['hot_water_away_setting'] = \
sensor_data["hot_water_away_enabled"]
self.device_data[sn]['hot_water_boost_setting'] = \
sensor_data["hot_water_boost_time_to_end"]
# Protect
elif bucket["object_key"].startswith(
f"topaz.{sn}"):
self.device_data[sn]['name'] = self._wheres[
sensor_data['where_id']
]
if sensor_data.get('description', None):
self.device_data[sn]['name'] += \
f' ({sensor_data["description"]})'
self.device_data[sn]['name'] += ' Protect'
self.device_data[sn]['co_status'] = \
self._map_nest_protect_state(sensor_data['co_status'])
self.device_data[sn]['smoke_status'] = \
self._map_nest_protect_state(sensor_data['smoke_status'])
self.device_data[sn]['battery_health_state'] = \
self._map_nest_protect_state(sensor_data['battery_health_state'])
# Temperature sensors
elif bucket["object_key"].startswith(
f"kryptonite.{sn}"):
self.device_data[sn]['name'] = self._wheres[
sensor_data['where_id']
]
if sensor_data.get('description', None):
self.device_data[sn]['name'] += \
f' ({sensor_data["description"]})'
self.device_data[sn]['name'] += ' Temperature'
self.device_data[sn]['temperature'] = \
sensor_data['current_temperature']
self.device_data[sn]['battery_level'] = \
sensor_data['battery_level']
# Cameras
elif bucket["object_key"].startswith(
f"quartz.{sn}"):
self.device_data[sn]['name'] = self._wheres[sensor_data['where_id']]
self.device_data[sn]['model'] = \
sensor_data["model"]
self.device_data[sn]['streaming_state'] = \
sensor_data["streaming_state"]
if 'indoor_chime' in sensor_data["capabilities"]:
self.device_data[sn]['indoor_chime'] = True
self._get_cameras_updates_pt2(sn)
else:
self.device_data[sn]['indoor_chime'] = False
except simplejson.errors.JSONDecodeError as e:
_LOGGER.error(e)
if r.status_code != 200 and r.status_code != 502:
_LOGGER.error('Information for further debugging: ' +
'return code {} '.format(r.status_code) +
'and returned text {}'.format(r.text))
if r.status_code == 502:
_LOGGER.error('Error 502, Failed to update, retrying in 30s')
sleep(30)
self.update()
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to update, trying again')
self.update()
except KeyError:
_LOGGER.debug('Failed to update, trying to log in again')
self.login()
self.update()
def thermostat_set_temperature(self, device_id, temp, temp_high=None):
if device_id not in self.thermostats:
return
try:
if temp_high is None:
self._session.post(
f"{self._czfe_url}/v5/put",
json={
"objects": [
{
"object_key": f'shared.{device_id}',
"op": "MERGE",
"value": {"target_temperature": temp},
}
]
},
headers={"Authorization": f"Basic {self._access_token}"},
)
else:
self._session.post(
f"{self._czfe_url}/v5/put",
json={
"objects": [
{
"object_key": f'shared.{device_id}',
"op": "MERGE",
"value": {
"target_temperature_low": temp,
"target_temperature_high": temp_high,
},
}
]
},
headers={"Authorization": f"Basic {self._access_token}"},
)
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to set temperature, trying again')
self.thermostat_set_temperature(device_id, temp, temp_high)
except KeyError:
_LOGGER.debug('Failed to set temperature, trying to log in again')
self.login()
self.thermostat_set_temperature(device_id, temp, temp_high)
def thermostat_set_target_humidity(self, device_id, humidity):
if device_id not in self.thermostats:
return
try:
self._session.post(
f"{self._czfe_url}/v5/put",
json={
"objects": [
{
"object_key": f'device.{device_id}',
"op": "MERGE",
"value": {"target_humidity": humidity},
}
]
},
headers={"Authorization": f"Basic {self._access_token}"},
)
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to set humidity, trying again')
self.thermostat_set_target_humidity(device_id, humidity)
except KeyError:
_LOGGER.debug('Failed to set humidity, trying to log in again')
self.login()
self.thermostat_set_target_humidity(device_id, humidity)
def thermostat_set_mode(self, device_id, mode):
if device_id not in self.thermostats:
return
try:
self._session.post(
f"{self._czfe_url}/v5/put",
json={
"objects": [
{
"object_key": f'shared.{device_id}',
"op": "MERGE",
"value": {"target_temperature_type": mode},
}
]
},
headers={"Authorization": f"Basic {self._access_token}"},
)
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to set mode, trying again')
self.thermostat_set_mode(device_id, mode)
except KeyError:
_LOGGER.debug('Failed to set mode, trying to log in again')
self.login()
self.thermostat_set_mode(device_id, mode)
def thermostat_set_fan(self, device_id, date):
if device_id not in self.thermostats:
return
try:
self._session.post(
f"{self._czfe_url}/v5/put",
json={
"objects": [
{
"object_key": f'device.{device_id}',
"op": "MERGE",
"value": {"fan_timer_timeout": date},
}
]
},
headers={"Authorization": f"Basic {self._access_token}"},
)
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to set fan, trying again')
self.thermostat_set_fan(device_id, date)
except KeyError:
_LOGGER.debug('Failed to set fan, trying to log in again')
self.login()
self.thermostat_set_fan(device_id, date)
def thermostat_set_eco_mode(self, device_id, state):
if device_id not in self.thermostats:
return
try:
mode = 'manual-eco' if state else 'schedule'
self._session.post(
f"{self._czfe_url}/v5/put",
json={
"objects": [
{
"object_key": f'device.{device_id}',
"op": "MERGE",
"value": {"eco": {"mode": mode}},
}
]
},
headers={"Authorization": f"Basic {self._access_token}"},
)
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to set eco, trying again')
self.thermostat_set_eco_mode(device_id, state)
except KeyError:
_LOGGER.debug('Failed to set eco, trying to log in again')
self.login()
self.thermostat_set_eco_mode(device_id, state)
def hotwater_set_boost(self, device_id, time):
if device_id not in self.hotwatercontrollers:
return
try:
self._session.post(
f"{self._czfe_url}/v5/put",
json={
"objects": [
{
"object_key": f'device.{device_id}',
"op": "MERGE",
"value": {"hot_water_boost_time_to_end": time},
}
]
},
headers={"Authorization": f"Basic {self._access_token}"},
)
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to boost hot water, trying again')
self.hotwater_set_boost(device_id, time)
except KeyError:
_LOGGER.debug('Failed to boost hot water, trying to log in again')
self.login()
self.hotwater_set_boost(device_id, time)
def hotwater_set_away_mode(self, device_id, away_mode):
if device_id not in self.hotwatercontrollers:
return
try:
self._session.post(
f"{self._czfe_url}/v5/put",
json={
"objects": [
{
"object_key": f'device.{device_id}',
"op": "MERGE",
"value": {"hot_water_away_enabled": away_mode},
}
]
},
headers={"Authorization": f"Basic {self._access_token}"},
)
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to set hot water away mode, trying again')
self.hotwater_set_away_mode(device_id, away_mode)
except KeyError:
_LOGGER.debug('Failed to set hot water away mode, '
'trying to log in again')
self.login()
self.hotwater_set_away_mode(device_id, away_mode)
def hotwater_set_mode(self, device_id, mode):
if device_id not in self.hotwatercontrollers:
return
try:
self._session.post(
f"{self._czfe_url}/v5/put",
json={
"objects": [
{
"object_key": f'device.{device_id}',
"op": "MERGE",
"value": {"hot_water_mode": mode},
}
]
},
headers={"Authorization": f"Basic {self._access_token}"},
)
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to set hot water mode, trying again')
self.hotwater_set_boost(device_id, mode)
except KeyError:
_LOGGER.debug('Failed to set hot water mode, '
'trying to log in again')
self.login()
self.hotwater_set_boost(device_id, mode)
def _camera_set_properties(self, device_id, property, value):
if device_id not in self.cameras:
return
try:
headers = {
'User-Agent': USER_AGENT,
'X-Requested-With': 'XmlHttpRequest',
'Referer': 'https://home.nest.com/',
'cookie': f"user_token={self._access_token}"
}
r = self._session.post(
f"{CAMERA_WEBAPI_BASE}/api/dropcams.set_properties",
data={property: value, "uuid": device_id}, headers=headers
)
return r.json()["items"]
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to set camera property, trying again')
return self._camera_set_properties(device_id, property, value)
except KeyError:
_LOGGER.debug('Failed to set camera property, ' +
'trying to log in again')
self.login()
return self._camera_set_properties(device_id, property, value)
def camera_turn_off(self, device_id):
if device_id not in self.cameras:
return
return self._camera_set_properties(device_id, "streaming.enabled", "false")
def camera_turn_on(self, device_id):
if device_id not in self.cameras:
return
return self._camera_set_properties(device_id, "streaming.enabled", "true")
def camera_get_image(self, device_id, now):
if device_id not in self.cameras:
return
try:
headers = {
'User-Agent': USER_AGENT,
'X-Requested-With': 'XmlHttpRequest',
'Referer': 'https://home.nest.com/',
'cookie': f"user_token={self._access_token}"
}
r = self._session.get(
f'{self._camera_url}/get_image?uuid={device_id}' +
f'&cachebuster={now}',
headers=headers
# headers={"cookie": f'user_token={self._access_token}'},
)
return r.content
except requests.exceptions.RequestException as e:
_LOGGER.error(e)
_LOGGER.error('Failed to get camera image, trying again')
return self.camera_get_image(device_id, now)
except KeyError:
_LOGGER.debug('Failed to get camera image, trying to log in again')
self.login()
return self.camera_get_image(device_id, now)
def camera_turn_chime_off(self, device_id):
if device_id not in self.switches:
return
return self._camera_set_properties(device_id, "doorbell.indoor_chime.enabled", "false")
def camera_turn_chime_on(self, device_id):
if device_id not in self.switches:
return
return self._camera_set_properties(device_id, "doorbell.indoor_chime.enabled", "true")
|
|
#!/usr/bin/env python
from __future__ import division
# NOTE: optparse deprecated in python3, but still supported
from optparse import OptionParser
import math
import sys
import numpy
import random
import time
import os
def read_cmd():
"""Function for reading command line options. Returns tuple options, args."""
usage = "usage: %prog [options] input_file"
parser = OptionParser(usage)
parser.add_option('-n','--nsample',dest='nsample', type='int', default=1, help='Number of samples.')
parser.add_option('-S','--subset',dest='subset', type='int', default=0, help='Number of representative molecules.')
parser.add_option('-c','--cycles',dest='cycles', type='int', default=100, help='Number of cycles for geometries reduction.')
parser.add_option('-j','--ncores',dest='ncores', type='int', default=1, help='Number of cores for parallel execution of geometries reduction.')
parser.add_option('-J','--jobspercore',dest='jobs', type='int', default=1, help='Number of reduction jobs per one core.')
parser.add_option('-d','--de',dest='de', type='float', default=0.02, help='Bin step in eV. Default = 0.02 ')
parser.add_option('-s','--sigma',dest='sigma', type='float', help='Parameter for Gaussian broadening.')
parser.add_option('-t','--tau',dest='tau', type='float', default=0.0, help='Parameter for Lorentzian broadening.')
# --smooth (perform runnning average?)
parser.add_option('','--notrans',dest='notrans', action="store_true",default=False,
help='No transition dipole moments. Spectrum will be normalized to unity. Useful for ionizations.' )
return parser.parse_args(sys.argv[1:])
# Some constants
EVtoJ = 1.602177e-19 # Joul
EPS = 8.854188e-12
PI = math.pi
HPRIME = 6.626070e-34/(2*PI)
C = 299792458
AUtoCm = 8.478354e-30
COEFF = PI * AUtoCm**2 * 1e4 /(3 * HPRIME * EPS * C)
test = 1 # 1 for Komogorov-Smirnov test, 2 for Kuiper test, 3 for differences sum, 4 for integral differences sum
# not needed anymore with numpy package
#def weightedMean(values, weights):
# totalWeight = sum(weights)
# totalValue = 0
# for i in range(len(values)):
# totalValue += values[i]*weights[i]
# weightedMean = totalValue/totalWeight
# return weightedMean
def weightedDev(values,weights):
mean = numpy.average(values, weights=weights)
variance = numpy.average((values-mean)**2, weights=weights)
return math.sqrt(variance)
class Spectrum(object):
"""Base class spectrum for reflection principle without broadening"""
def __init__(self, nsample, deltaE, notrans, subset, cycles, ncores, jobs):
self.trans = []
self.intensity = []
self.exc = []
self.energies = []
self.samples = []
self.subsamples = []
self.restsamples = []
self.subsamplesact = []
self.restsamplesact = []
self.maxe = 0.0
self.nsample = nsample
self.notrans = False
self.de = deltaE # in eV
self.subset = subset
self.cycles = cycles
self.ncores = ncores
self.jobs = jobs
self.pid = os.getpid()
if notrans == True:
self.notrans = True
def trans2intensity(self):
self.intensity = [ 0.0 for i in range(int(self.maxe/self.de))]
for i in range(len(self.trans)):
index = int (round( (self.maxe-self.exc[i]) / self.de ) )
trans2 = self.trans[i][0]**2 + self.trans[i][1]**2 + self.trans[i][2]**2
trans2 *= COEFF * self.exc[i] / self.de / self.nsample
self.intensity[index] += trans2
def normalize(self):
# self.intensity = [ 0.0 for i in range(int( self.maxe/self.de )) ]
# self.intensity = [0] * int( self.maxe/self.de )
for j in range(int( self.maxe/self.de )):
self.intensity.append(0.0)
for i in range(len(self.exc)):
index = int( (self.maxe-self.exc[i]) / self.de )
self.intensity[index] += 1.0 / self.nsample / self.de
def cross2eps(self):
"""Conversion to molar exctinction coefficient"""
for i in range(len(self.intensity)):
self.intensity[i] *= 6.022140e20 / math.log(10)
def KStest(self):
forig = 0.0
fact = 0.0
d = 0.0
for i in range(len(self.origintensity)):
forig += self.origintensity[i]
fact += self.intensity[i]
dact = abs(forig-fact) # KS test
if dact > d:
d = dact
return d
def kuiper(self):
forig = 0.0
fact = 0.0
dminus = 0.0
dplus = 0.0
for i in range(len(self.origintensity)):
forig += self.origintensity[i]
fact += self.intensity[i]
dact = forig-fact
dminusact = forig-fact
dplusact = -dminusact
if dminusact > dminus:
dminus = dminusact
if dplusact > dplus:
dplus = dplusact
d = dplus+dminus
return d
def diffsum(self):
d = 0.0
for i in range(len(self.origintensity)):
d += abs(self.origintensity[i] - self.intensity[i])
return d
def intdiffsum(self):
forig = 0.0
fact = 0.0
d = 0.0
for i in range(len(self.origintensity)):
forig += self.origintensity[i]
fact += self.intensity[i]
d += abs(forig-fact)
return d
def calc_diff(self):
if test == 1:
d = self.KStest()
elif test == 2:
d = self.kuiper()
elif test == 3:
d = self.diffsum()
else:
d = self.intdiffsum()
return d
def select_subset(self):
self.subsamples = random.sample(range(1, len(self.exc_orig), 1),self.subset)
self.restsamples = list(set(range(1, len(self.exc_orig), 1)) - set(self.subsamples))
def select_subsetact(self):
self.subsamplesact = random.sample(range(1, len(self.exc_orig), 1),self.subset)
self.restsamplesact = list(set(range(1, len(self.exc_orig), 1)) - set(self.subsamplesact))
def swap_samples(self):
random_subindex = random.randrange(len(self.subsamplesact))
random_restindex = random.randrange(len(self.restsamplesact))
self.subsamplesact[random_subindex], self.restsamplesact[random_restindex] = self.restsamplesact[random_restindex], self.subsamplesact[random_subindex]
def SA(self,test=0):
pi = 0.70
pf = 0.30
if test == 1:
it = 1
d = self.calc_diff()
diffmax = 0
diffmin = d
else:
self.select_subset()
self.exc = list( self.exc_orig[i] for i in self.subsamples )
self.trans = list( self.trans_orig[i] for i in self.subsamples )
if self.notrans == True:
self.normalize()
else:
self.trans2intensity()
self.finish_spectrum()
d = self.calc_diff()
if self.subset>=8:
itmax = self.subset/4
elif self.subset>=4:
itmax = self.subset/2
elif self.subset>=2:
itmax = self.subset
else:
itmax = 2*self.subset
itmin = itmax/2
itc = math.exp((math.log(itmax)-math.log(itmin))/self.cycles)
it = itmin
start = time.time()
ti,tf = self.SA(1)
end = time.time()
m,s = divmod(int(round((itmax+itmin)*(end-start)/2)), 60)
h,m = divmod(m, 60)
tc = math.exp((math.log(tf)-math.log(ti))/self.cycles)
temp = ti
toprint = str(self.pid)+":\tInitial temperature = "+str(ti)+", Final temperature = "+str(tf)+", Temperature coefficient = "+str(tc)
toprint += "\n\tMarkov Chain Length coefficient = "+str(itc)+", Initial D-min = "+str(d)
toprint += "\n\tEstimated run time: "+str(h)+" hours "+str(m)+" minutes "+str(s)+" seconds"
print(toprint)
sys.stdout.flush()
dact = d
for j in range(self.cycles):
for k in range(int(round(it))):
self.subsamplesact = self.subsamples[:]
self.restsamplesact = self.restsamples[:]
self.swap_samples()
self.exc = list( self.exc_orig[i] for i in self.subsamplesact )
self.trans = list( self.trans_orig[i] for i in self.subsamplesact )
if self.notrans == True:
self.normalize()
else:
self.trans2intensity()
self.finish_spectrum()
dact = self.calc_diff()
if test == 1:
prob = 1
if dact > d:
if (dact-d) > diffmax:
diffmax = dact-d
elif (dact-d) < diffmin:
diffmin = dact-d
else:
if dact < d:
prob = 1.0
else:
prob = math.exp((d - dact)/ temp)
# print("d - dact"+str(d - dact)+"temp"+str(temp)+"prob"+str(prob))
if prob >= random.random():
self.subsamples = self.subsamplesact
self.restsamples = self.restsamplesact
d = dact
# print("Sample"+str(j)+"Round"+str(k)+": D-min ="+str(d)+"Temperature : T = "+str(temp))
if test == 0:
temp *= tc
it *= itc
if test == 1:
return -diffmax/math.log(pi), -diffmin/math.log(pf)
else:
return d
def random_search(self):
d = self.calc_diff()
print("Initial sample : D-min = "+str(d))
dact = d
for i in range(self.cycles):
self.select_subsetact()
self.exc = list( self.exc_orig[i] for i in self.subsamplesact )
self.trans = list( self.trans_orig[i] for i in self.subsamplesact )
if self.notrans == True:
self.normalize()
else:
self.trans2intensity()
self.finish_spectrum()
dact = self.calc_diff()
if dact <= d:
self.subsamples = self.subsamplesact
self.restsamples = self.restsamplesact
d = dact
print("Sample"+str(i)+": D-min ="+str(d))
def reduce_geoms(self,infile):
if self.notrans == True:
self.normalize()
else:
self.trans2intensity()
self.finish_spectrum()
toprint = "Original spectrum sigma: "+str(self.sigma)
toprint += "\nPrinting original spectra:"
self.writeoutall(infile,toprint)
sys.stdout.flush()
self.origintensity = self.intensity[:]
self.exc_orig = self.exc
self.trans_orig = self.trans
self.nsample = self.subset
jobs = []
for i in range(self.ncores):
pid = os.fork()
if pid == 0:
for j in range(self.jobs):
self.pid = str(os.getpid())+"_"+str(j);
random.seed()
random.jumpahead(os.getpid())
d = self.SA()
toprint = str(self.pid)+":\tFinal D-min = "+str(d)
toprint += "\n\tReduced spectrum sigma: "+str(self.sigma)
toprint += "\n\tPrinting reduced spectra:"
self.writeoutall(infile,toprint)
self.writegeoms(infile)
sys.stdout.flush()
os._exit(0)
jobs.append(pid)
for job in jobs:
os.waitpid(job,0)
def read_data(self, infile):
i = 0
with open(infile, "r") as f:
for line in f:
#if (self.notrans == True and self.subset == 0 and i >= self.nsample) or (self.notrans == True and self.subset > 0 and i >= 2*self.nsample) or (self.notrans == False and self.subset == 0 and i >= 2*self.nsample) or (self.notrans == False and self.subset > 0 and i >= 3*self.nsample):
# break
if (i % 3 == 1 and self.subset > 0 and self.notrans == False) or (i % 2 == 1 and self.subset == 0 and self.notrans == False):
temp = line.split()
try:
# assigning transition dipole moments as a tuple
self.trans.append( ( float(temp[0]), float(temp[1]), float(temp[2]) ) )
except:
print("Error: Corrupted line "+str(i+1)+" in file "+infile)
print("I expected 3 columns of transition dipole moments, got:")
print(line)
#raise
sys.exit(1)
elif (i % 3 == 2 and self.subset > 0 and self.notrans == False) or (i % 2 == 1 and self.subset > 0 and self.notrans ==True):
try:
self.samples.append( line )
except:
print("Error when reading file "+infile+" on line: "+str(i+1))
print("I expected name of the file, but got:"+line)
# raise
sys.exit(1)
else:
try:
self.exc.append(float( line ))
except:
print("Error when reading file "+infile+" on line: "+str(i+1))
print("I expected excitation energy, but got:"+line)
# raise
sys.exit(1)
i += 1
# assert(len(self.exc)==len(self.trans))
if len(self.exc) != len(self.trans) and not self.notrans:
print("Error: Number of excitations does not match number of transition dipole moments.")
sys.exit(1)
if len(self.exc) != len(self.samples) and self.subset > 0:
print("Error: Number of excitations does not match number of samples.")
sys.exit(1)
self.maxe = max(self.exc)+0.5
self.minE = min(self.exc)-0.5
f.close()
def finish_spectrum(self):
self.energies = [ 0.0 for i in range(len(self.intensity)-1, -1, -1)]
for i in range(len(self.intensity)-1, -1, -1):
self.energies[i] = (self.maxe-i*self.de)
def writeout(self, xunit, fileout):
units = {}
units['nm'] = 1239.8
units['ev'] = 1.
units['cm'] = 8065.7
f = open(fileout, "w")
for i in range(len(self.intensity)-1, -1, -1):
if self.energies[i] < self.minE:
continue
if xunit == "nm":
if units[xunit]/self.energies[i] < 1000:
f.write('%f %e \n' % (units[xunit]/self.energies[i], self.intensity[i]))
else:
f.write('%f %e \n' % (self.energies[i]*units[xunit], self.intensity[i]))
f.close()
def writeoutall(self,infile,message=""):
name = infile.split(".")[0] # take the first part of the input file, before first dot
yunits="cm^2*molecule^-1"
xunits = [ "ev", "nm", "cm"]
toprint = message
for un in xunits:
outfile="absspec."+name+"."+un+"."+str(self.nsample)+".cross."+str(self.pid)+".dat"
toprint += "\n\tPrinting spectrum in units [ "+un+", "+yunits+"] to "+outfile
self.writeout(un, outfile)
# Now convert to molar exctiction coefficient
helpints = self.intensity[:]
self.cross2eps()
yunits="dm^3*mol^-1*cm^-1"
for un in xunits:
outfile="absspec."+name+"."+un+"."+str(self.nsample)+".molar."+str(self.pid)+".dat"
toprint += "\n\tPrinting spectrum in units [ "+un+", "+yunits+"] to "+outfile
self.writeout(un, outfile)
print(toprint)
self.intensity = helpints
def writegeoms(self,infile):
name = infile.split(".")[0]
outfile = name+"."+str(self.nsample)+"."+str(self.pid)+".geoms"
print(str(self.pid)+":\tPrinting geometries of reduced spectrum to "+outfile)
f = open(outfile, "w")
for i in self.subsamples:
f.write('%s' % (self.samples[i]))
f.close()
class SpectrumBroad(Spectrum):
"""Derived class for spectra with empirical gaussian and/or lorentzian broadening"""
def __init__(self, nsample, deltaE, sigma, tau, notrans, subset, cycles, ncores, jobs):
self.trans = []
self.intensity = []
self.exc = []
self.energies = []
self.samples = []
self.subsamples = []
self.restsamples = []
self.subsamplesact = []
self.restsamplesact = []
self.int_tau = []
self.int_sigma = []
self.acs = []
self.sigma = sigma
self.tau = tau
self.nsample = nsample
self.notrans = False
self.de = deltaE # in eV
self.subset = subset
self.cycles = cycles
self.ncores = ncores
self.jobs = jobs
self.pid = os.getpid()
if notrans == True:
self.notrans = True
def setSigma(self):
if self.notrans == True:
dev = numpy.std(self.exc)
else:
dev = weightedDev(self.exc,self.acs)
self.sigma = (4. * dev ** 5. / 3. / self.nsample) ** (1./5.)
def trans2intensity(self):
self.int_sigma = [ 0.0 for i in range(len(self.exc))]
self.int_tau = [ 0.0 for i in range(len(self.exc))]
self.acs = [ 0.0 for i in range(len(self.exc))]
for i in range(len(self.exc)):
trans2 = self.trans[i][0]**2 + self.trans[i][1]**2 + self.trans[i][2]**2
self.acs[i] = trans2 * COEFF * self.exc[i] / self.nsample
if(self.sigma == 0) or (self.subset > 0):
self.setSigma()
for i in range(len(self.exc)):
if self.sigma > 0:
self.int_sigma[i] = self.acs[i] / math.sqrt(2*PI) / self.sigma
if self.tau > 0:
self.int_tau[i] += self.acs[i] * self.tau / 2 / PI
if self.sigma > 0 and self.tau > 0.0:
self.int_sigma[i] /= 2
self.int_tau[i] /= 2
def normalize(self):
if(self.sigma == 0):
self.setSigma()
if self.sigma > 0:
self.int_sigma = [ 1.0 / self.sigma / math.sqrt(2*PI) / self.nsample for i in range(len(self.exc))]
if self.tau > 0:
self.int_tau = [ self.tau / (2*PI) / self.nsample for i in range(len(self.exc))]
if self.sigma > 0.0 and self.tau > 0.0:
for i in range(len(self.int_tau)):
self.int_sigma[i] /= 2.0
self.int_tau[i] /= 2.0
def finish_spectrum(self):
self.energies = [ 0.0 for i in range(int( self.maxe/self.de )-1, -1, -1)]
self.intensity = [ 0.0 for i in range(int( self.maxe/self.de )-1, -1, -1)]
for i in range(int( self.maxe/self.de )-1, -1, -1):
self.energies[i] = (self.maxe-i*self.de)
for j in range(len(self.exc)):
if self.sigma > 0.0:
self.intensity[i] += self.int_sigma[j]*math.exp(-( (self.energies[i]-self.exc[j])**2 )/2/self.sigma**2 )
if self.tau > 0.0:
self.intensity[i] += self.int_tau[j] / ( (self.energies[i]-self.exc[j])**2 +(self.tau**2)/4 )
def writeout(self, xunit, fileout):
units = {}
units['nm'] = 1239.8
units['ev'] = 1.
units['cm'] = 8065.7
f = open(fileout, "w")
for i in range(int( self.maxe/self.de )-1, -1, -1):
if self.energies[i] < self.minE:
continue
if xunit == "nm":
if units[xunit]/self.energies[i] < 1000:
f.write('%f %e \n' % (units[xunit]/self.energies[i], self.intensity[i] ))
else:
f.write('%f %e \n' % (self.energies[i]*units[xunit], self.intensity[i] ))
f.close()
options, args = read_cmd()
try:
infile = args[0]
except:
print("You did not specified input file. Type -h for help."); sys.exit(1)
if options.tau > 0.0 or options.sigma is not None:
spectrum = SpectrumBroad(options.nsample, options.de, options.sigma, options.tau, options.notrans, options.subset, options.cycles, options.ncores, options.jobs)
else:
spectrum = Spectrum(options.nsample, options.de, options.notrans,options.subset, options.cycles, options.ncores, options.jobs)
spectrum.read_data(infile)
if spectrum.subset > 0:
spectrum.reduce_geoms(infile)
else:
if spectrum.notrans == True:
spectrum.normalize()
else:
spectrum.trans2intensity()
spectrum.finish_spectrum()
spectrum.writeoutall(infile)
|
|
# Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# and Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
# Modifications to create of the HMMLearn module: Gael Varoquaux
"""
The :mod:`hmmlearn.hmm` module implements hidden Markov models.
"""
import string
import numpy as np
from sklearn.utils import check_random_state
from sklearn.utils.extmath import logsumexp
from sklearn.base import BaseEstimator
from sklearn.mixture import (
GMM, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from sklearn import cluster
from .utils.fixes import log_multivariate_normal_density
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
WARNING: The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lnP, lneta)
stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars. Defaults to all parameters.
Examples
--------
>>> from hmmlearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_components, self.n_features)):
raise ValueError('means must have shape '
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
self._covars_[self._covars_==0] = 1e-5
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den, 1e-5)
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in range(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
return super(GaussianHMM, self).fit(obs)
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
Examples
--------
>>> from hmmlearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
"""
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can
contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat, 'm' for
means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
Examples
--------
>>> from hmmlearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.score_samples(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
|
|
"""Ginkgo service core
This module implements the core service model and several convenience
decorators to use with your services. The primary export of this module is
`Service`, but much of the implementation is in `BasicService`. `BasicService`
uses a simple state machine defined by `ServiceStateMachine` and implements the
core service interface.
`BasicService` assumes no async model, whereas `Service` creates an
`AsyncManager` from a driver in the `async` module. It's assumed the common
case is to create async applications, but there are cases when you need a
`Service` with no async. For example, `AsyncManager` classes inherit from
`BasicService`, otherwise there would be a circular dependency.
"""
import functools
import runpy
from .util import AbstractStateMachine
from .util import defaultproperty
from . import Setting
def require_ready(func):
""" Decorator that blocks the call until the service is ready """
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
try:
self.state.wait("ready", self.ready_timeout)
except Exception, e:
pass
if not self.ready:
raise RuntimeWarning("Service must be ready to call this method.")
return func(self, *args, **kwargs)
return wrapped
def autospawn(func):
""" Decorator that will spawn the call in a local greenlet """
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
self.spawn(func, self, *args, **kwargs)
return wrapped
class ServiceStateMachine(AbstractStateMachine):
""" +------+
| init |
+--+---+
|
v
+-------------------+
+--->| start() |
| |-------------------| +-------------------+
| | starting +---+--->| stop() |
| +-------------------+ | |-------------------|
| | | | stopping |
| v | +-------------------+
| +-----------+ | |
| | ready() | | |
| |-----------| | v
| | ready +-------+ +-------------+
| +-----------+ | stopped() |
| |-------------|
+------------------------------------+ stopped |
+-------------+
http://www.asciiflow.com/#7278337222084818599/1920677602
"""
initial_state = "init"
allow_wait = ["ready", "stopped"]
event_start = \
["init", "stopped"], "starting", "pre_start"
event_ready = \
["starting"], "ready", "post_start"
event_stop = \
["ready", "starting"], "stopping", "pre_stop"
event_stopped = \
["stopping"], "stopped", "post_stop"
class BasicService(object):
_statemachine_class = ServiceStateMachine
_children = defaultproperty(list)
start_timeout = defaultproperty(int, 2)
start_before = defaultproperty(bool, False)
def pre_init(self):
pass
def __new__(cls, *args, **kwargs):
s = super(BasicService, cls).__new__(cls, *args, **kwargs)
s.pre_init()
s.state = cls._statemachine_class(s)
return s
@property
def service_name(self):
return self.__class__.__name__
@property
def ready(self):
return self.state.current == 'ready'
def add_service(self, service):
"""Add a child service to this service
The service added will be started when this service starts, before
its :meth:`_start` method is called. It will also be stopped when this
service stops, before its :meth:`_stop` method is called.
"""
self._children.append(service)
def remove_service(self, service):
"""Remove a child service from this service"""
self._children.remove(service)
def start(self, block_until_ready=True):
"""Starts children and then this service. By default it blocks until ready."""
self.state("start")
if self.start_before:
self.do_start()
for child in self._children:
if child.state.current not in ["ready", "starting"]:
child.start(block_until_ready)
if not self.start_before:
ready = not self.do_start()
if not ready and block_until_ready is True:
self.state.wait("ready", self.start_timeout)
elif ready:
self.state("ready")
else:
self.state("ready")
def pre_start(self):
pass
def do_start(self):
"""Empty implementation of service start. Implement me!
Return `service.NOT_READY` to block until :meth:`set_ready` is
called (or `ready_timeout` is reached).
"""
return
def post_start(self):
pass
def stop(self):
"""Stop child services in reverse order and then this service"""
if self.state.current in ["init", "stopped"]:
return
ready_before_stop = self.ready
self.state("stop")
for child in reversed(self._children):
child.stop()
if ready_before_stop:
self.do_stop()
self.state("stopped")
def pre_stop(self):
pass
def post_stop(self):
pass
def do_stop(self):
"""Empty implementation of service stop. Implement me!"""
return
def reload(self):
def _reload_children():
for child in self._children:
child.reload()
if self.start_before:
self.do_reload()
_reload_children()
else:
_reload_children()
self.do_reload()
def do_reload(self):
"""Empty implementation of service reload. Implement me!"""
pass
def serve_forever(self):
"""Start the service if it hasn't been already started and wait until it's stopped."""
try:
self.start()
except RuntimeWarning, e:
# If it can't start because it's
# already started, just move on
pass
self.state.wait("stopped")
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
class Service(BasicService):
async_available = ["ginkgo.async." + m for m in ("gevent", "threading",
"eventlet")]
async = Setting("async", default="ginkgo.async.threading", help="""\
The async reactor to use. Available choices:
ginkgo.async.gevent
ginkgo.async.threading
ginkgo.async.eventlet
""")
def pre_init(self):
try:
mod = runpy.run_module(self.async)
self.async = mod['AsyncManager']()
self.add_service(self.async)
except (NotImplementedError, ImportError) as e:
if self.async not in self.async_available:
helptext = ("Please select a valid async module: \n\t"
+ "\n\t".join(self.async_available))
elif self.async.endswith("gevent"):
helptext = ("Please make sure gevent is installed or use "
"a different async manager.")
else:
helptext = ""
raise RuntimeError(
"Unable to load async manager from {}.\n{}".format(self.async,
helptext))
def spawn(self, *args, **kwargs):
return self.async.spawn(*args, **kwargs)
def spawn_later(self, *args, **kwargs):
return self.async.spawn_later(*args, **kwargs)
|
|
# stdlib
from collections import defaultdict, namedtuple
import time
import urlparse
# 3p
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers
class NodeNotFound(Exception):
pass
ESInstanceConfig = namedtuple(
'ESInstanceConfig', [
'pshard_stats',
'cluster_stats',
'password',
'service_check_tags',
'tags',
'timeout',
'url',
'username',
'pending_task_stats',
'ssl_verify',
'ssl_cert',
'ssl_key',
])
class ESCheck(AgentCheck):
SERVICE_CHECK_CONNECT_NAME = 'elasticsearch.can_connect'
SERVICE_CHECK_CLUSTER_STATUS = 'elasticsearch.cluster_health'
DEFAULT_TIMEOUT = 5
# Clusterwise metrics, pre aggregated on ES, compatible with all ES versions
PRIMARY_SHARD_METRICS = {
"elasticsearch.primaries.docs.count": ("gauge", "_all.primaries.docs.count"),
"elasticsearch.primaries.docs.deleted": ("gauge", "_all.primaries.docs.deleted"),
"elasticsearch.primaries.store.size": ("gauge", "_all.primaries.store.size_in_bytes"),
"elasticsearch.primaries.indexing.index.total": ("gauge", "_all.primaries.indexing.index_total"),
"elasticsearch.primaries.indexing.index.time": ("gauge", "_all.primaries.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.index.current": ("gauge", "_all.primaries.indexing.index_current"),
"elasticsearch.primaries.indexing.delete.total": ("gauge", "_all.primaries.indexing.delete_total"),
"elasticsearch.primaries.indexing.delete.time": ("gauge", "_all.primaries.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.delete.current": ("gauge", "_all.primaries.indexing.delete_current"),
"elasticsearch.primaries.get.total": ("gauge", "_all.primaries.get.total"),
"elasticsearch.primaries.get.time": ("gauge", "_all.primaries.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.current": ("gauge", "_all.primaries.get.current"),
"elasticsearch.primaries.get.exists.total": ("gauge", "_all.primaries.get.exists_total"),
"elasticsearch.primaries.get.exists.time": ("gauge", "_all.primaries.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.missing.total": ("gauge", "_all.primaries.get.missing_total"),
"elasticsearch.primaries.get.missing.time": ("gauge", "_all.primaries.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.total": ("gauge", "_all.primaries.search.query_total"),
"elasticsearch.primaries.search.query.time": ("gauge", "_all.primaries.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.current": ("gauge", "_all.primaries.search.query_current"),
"elasticsearch.primaries.search.fetch.total": ("gauge", "_all.primaries.search.fetch_total"),
"elasticsearch.primaries.search.fetch.time": ("gauge", "_all.primaries.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.fetch.current": ("gauge", "_all.primaries.search.fetch_current"),
"elasticsearch.indices.count": ("gauge", "indices", lambda indices: len(indices))
}
PRIMARY_SHARD_METRICS_POST_1_0 = {
"elasticsearch.primaries.merges.current": ("gauge", "_all.primaries.merges.current"),
"elasticsearch.primaries.merges.current.docs": ("gauge", "_all.primaries.merges.current_docs"),
"elasticsearch.primaries.merges.current.size": ("gauge", "_all.primaries.merges.current_size_in_bytes"),
"elasticsearch.primaries.merges.total": ("gauge", "_all.primaries.merges.total"),
"elasticsearch.primaries.merges.total.time": ("gauge", "_all.primaries.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.merges.total.docs": ("gauge", "_all.primaries.merges.total_docs"),
"elasticsearch.primaries.merges.total.size": ("gauge", "_all.primaries.merges.total_size_in_bytes"),
"elasticsearch.primaries.refresh.total": ("gauge", "_all.primaries.refresh.total"),
"elasticsearch.primaries.refresh.total.time": ("gauge", "_all.primaries.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.flush.total": ("gauge", "_all.primaries.flush.total"),
"elasticsearch.primaries.flush.total.time": ("gauge", "_all.primaries.flush.total_time_in_millis", lambda v: float(v)/1000)
}
STATS_METRICS = { # Metrics that are common to all Elasticsearch versions
"elasticsearch.docs.count": ("gauge", "indices.docs.count"),
"elasticsearch.docs.deleted": ("gauge", "indices.docs.deleted"),
"elasticsearch.store.size": ("gauge", "indices.store.size_in_bytes"),
"elasticsearch.indexing.index.total": ("gauge", "indices.indexing.index_total"),
"elasticsearch.indexing.index.time": ("gauge", "indices.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.index.current": ("gauge", "indices.indexing.index_current"),
"elasticsearch.indexing.delete.total": ("gauge", "indices.indexing.delete_total"),
"elasticsearch.indexing.delete.time": ("gauge", "indices.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.delete.current": ("gauge", "indices.indexing.delete_current"),
"elasticsearch.get.total": ("gauge", "indices.get.total"),
"elasticsearch.get.time": ("gauge", "indices.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.current": ("gauge", "indices.get.current"),
"elasticsearch.get.exists.total": ("gauge", "indices.get.exists_total"),
"elasticsearch.get.exists.time": ("gauge", "indices.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.missing.total": ("gauge", "indices.get.missing_total"),
"elasticsearch.get.missing.time": ("gauge", "indices.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.total": ("gauge", "indices.search.query_total"),
"elasticsearch.search.query.time": ("gauge", "indices.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.current": ("gauge", "indices.search.query_current"),
"elasticsearch.search.fetch.total": ("gauge", "indices.search.fetch_total"),
"elasticsearch.search.fetch.time": ("gauge", "indices.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.fetch.current": ("gauge", "indices.search.fetch_current"),
"elasticsearch.indices.segments.count": ("gauge", "indices.segments.count"),
"elasticsearch.indices.segments.memory_in_bytes": ("gauge", "indices.segments.memory_in_bytes"),
"elasticsearch.merges.current": ("gauge", "indices.merges.current"),
"elasticsearch.merges.current.docs": ("gauge", "indices.merges.current_docs"),
"elasticsearch.merges.current.size": ("gauge", "indices.merges.current_size_in_bytes"),
"elasticsearch.merges.total": ("gauge", "indices.merges.total"),
"elasticsearch.merges.total.time": ("gauge", "indices.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.merges.total.docs": ("gauge", "indices.merges.total_docs"),
"elasticsearch.merges.total.size": ("gauge", "indices.merges.total_size_in_bytes"),
"elasticsearch.refresh.total": ("gauge", "indices.refresh.total"),
"elasticsearch.refresh.total.time": ("gauge", "indices.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.flush.total": ("gauge", "indices.flush.total"),
"elasticsearch.flush.total.time": ("gauge", "indices.flush.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.process.open_fd": ("gauge", "process.open_file_descriptors"),
"elasticsearch.transport.rx_count": ("gauge", "transport.rx_count"),
"elasticsearch.transport.tx_count": ("gauge", "transport.tx_count"),
"elasticsearch.transport.rx_size": ("gauge", "transport.rx_size_in_bytes"),
"elasticsearch.transport.tx_size": ("gauge", "transport.tx_size_in_bytes"),
"elasticsearch.transport.server_open": ("gauge", "transport.server_open"),
"elasticsearch.thread_pool.bulk.active": ("gauge", "thread_pool.bulk.active"),
"elasticsearch.thread_pool.bulk.threads": ("gauge", "thread_pool.bulk.threads"),
"elasticsearch.thread_pool.bulk.queue": ("gauge", "thread_pool.bulk.queue"),
"elasticsearch.thread_pool.bulk.rejected": ("rate", "thread_pool.bulk.rejected"),
"elasticsearch.thread_pool.flush.active": ("gauge", "thread_pool.flush.active"),
"elasticsearch.thread_pool.flush.threads": ("gauge", "thread_pool.flush.threads"),
"elasticsearch.thread_pool.flush.queue": ("gauge", "thread_pool.flush.queue"),
"elasticsearch.thread_pool.flush.rejected": ("rate", "thread_pool.flush.rejected"),
"elasticsearch.thread_pool.generic.active": ("gauge", "thread_pool.generic.active"),
"elasticsearch.thread_pool.generic.threads": ("gauge", "thread_pool.generic.threads"),
"elasticsearch.thread_pool.generic.queue": ("gauge", "thread_pool.generic.queue"),
"elasticsearch.thread_pool.generic.rejected": ("rate", "thread_pool.generic.rejected"),
"elasticsearch.thread_pool.get.active": ("gauge", "thread_pool.get.active"),
"elasticsearch.thread_pool.get.threads": ("gauge", "thread_pool.get.threads"),
"elasticsearch.thread_pool.get.queue": ("gauge", "thread_pool.get.queue"),
"elasticsearch.thread_pool.get.rejected": ("rate", "thread_pool.get.rejected"),
"elasticsearch.thread_pool.index.active": ("gauge", "thread_pool.index.active"),
"elasticsearch.thread_pool.index.threads": ("gauge", "thread_pool.index.threads"),
"elasticsearch.thread_pool.index.queue": ("gauge", "thread_pool.index.queue"),
"elasticsearch.thread_pool.index.rejected": ("rate", "thread_pool.index.rejected"),
"elasticsearch.thread_pool.management.active": ("gauge", "thread_pool.management.active"),
"elasticsearch.thread_pool.management.threads": ("gauge", "thread_pool.management.threads"),
"elasticsearch.thread_pool.management.queue": ("gauge", "thread_pool.management.queue"),
"elasticsearch.thread_pool.management.rejected": ("rate", "thread_pool.management.rejected"),
"elasticsearch.thread_pool.percolate.active": ("gauge", "thread_pool.percolate.active"),
"elasticsearch.thread_pool.percolate.threads": ("gauge", "thread_pool.percolate.threads"),
"elasticsearch.thread_pool.percolate.queue": ("gauge", "thread_pool.percolate.queue"),
"elasticsearch.thread_pool.percolate.rejected": ("rate", "thread_pool.percolate.rejected"),
"elasticsearch.thread_pool.refresh.active": ("gauge", "thread_pool.refresh.active"),
"elasticsearch.thread_pool.refresh.threads": ("gauge", "thread_pool.refresh.threads"),
"elasticsearch.thread_pool.refresh.queue": ("gauge", "thread_pool.refresh.queue"),
"elasticsearch.thread_pool.refresh.rejected": ("rate", "thread_pool.refresh.rejected"),
"elasticsearch.thread_pool.search.active": ("gauge", "thread_pool.search.active"),
"elasticsearch.thread_pool.search.threads": ("gauge", "thread_pool.search.threads"),
"elasticsearch.thread_pool.search.queue": ("gauge", "thread_pool.search.queue"),
"elasticsearch.thread_pool.search.rejected": ("rate", "thread_pool.search.rejected"),
"elasticsearch.thread_pool.snapshot.active": ("gauge", "thread_pool.snapshot.active"),
"elasticsearch.thread_pool.snapshot.threads": ("gauge", "thread_pool.snapshot.threads"),
"elasticsearch.thread_pool.snapshot.queue": ("gauge", "thread_pool.snapshot.queue"),
"elasticsearch.thread_pool.snapshot.rejected": ("rate", "thread_pool.snapshot.rejected"),
"elasticsearch.thread_pool.suggest.active": ("gauge", "thread_pool.suggest.active"),
"elasticsearch.thread_pool.suggest.threads": ("gauge", "thread_pool.suggest.threads"),
"elasticsearch.thread_pool.suggest.queue": ("gauge", "thread_pool.suggest.queue"),
"elasticsearch.thread_pool.suggest.rejected": ("rate", "thread_pool.suggest.rejected"),
"elasticsearch.thread_pool.warmer.active": ("gauge", "thread_pool.warmer.active"),
"elasticsearch.thread_pool.warmer.threads": ("gauge", "thread_pool.warmer.threads"),
"elasticsearch.thread_pool.warmer.queue": ("gauge", "thread_pool.warmer.queue"),
"elasticsearch.thread_pool.warmer.rejected": ("rate", "thread_pool.warmer.rejected"),
"elasticsearch.http.current_open": ("gauge", "http.current_open"),
"elasticsearch.http.total_opened": ("gauge", "http.total_opened"),
"jvm.mem.heap_committed": ("gauge", "jvm.mem.heap_committed_in_bytes"),
"jvm.mem.heap_used": ("gauge", "jvm.mem.heap_used_in_bytes"),
"jvm.mem.heap_in_use": ("gauge", "jvm.mem.heap_used_percent"),
"jvm.mem.heap_max": ("gauge", "jvm.mem.heap_max_in_bytes"),
"jvm.mem.non_heap_committed": ("gauge", "jvm.mem.non_heap_committed_in_bytes"),
"jvm.mem.non_heap_used": ("gauge", "jvm.mem.non_heap_used_in_bytes"),
"jvm.threads.count": ("gauge", "jvm.threads.count"),
"jvm.threads.peak_count": ("gauge", "jvm.threads.peak_count"),
"elasticsearch.fs.total.total_in_bytes": ("gauge", "fs.total.total_in_bytes"),
"elasticsearch.fs.total.free_in_bytes": ("gauge", "fs.total.free_in_bytes"),
"elasticsearch.fs.total.available_in_bytes": ("gauge", "fs.total.available_in_bytes"),
}
JVM_METRICS_POST_0_90_10 = {
"jvm.gc.collectors.young.count": ("gauge", "jvm.gc.collectors.young.collection_count"),
"jvm.gc.collectors.young.collection_time": ("gauge", "jvm.gc.collectors.young.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collectors.old.count": ("gauge", "jvm.gc.collectors.old.collection_count"),
"jvm.gc.collectors.old.collection_time": ("gauge", "jvm.gc.collectors.old.collection_time_in_millis", lambda v: float(v)/1000)
}
JVM_METRICS_PRE_0_90_10 = {
"jvm.gc.concurrent_mark_sweep.count": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_count"),
"jvm.gc.concurrent_mark_sweep.collection_time": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.par_new.count": ("gauge", "jvm.gc.collectors.ParNew.collection_count"),
"jvm.gc.par_new.collection_time": ("gauge", "jvm.gc.collectors.ParNew.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collection_count": ("gauge", "jvm.gc.collection_count"),
"jvm.gc.collection_time": ("gauge", "jvm.gc.collection_time_in_millis", lambda v: float(v)/1000),
}
ADDITIONAL_METRICS_POST_0_90_5 = {
"elasticsearch.search.fetch.open_contexts": ("gauge", "indices.search.open_contexts"),
"elasticsearch.fielddata.size": ("gauge", "indices.fielddata.memory_size_in_bytes"),
"elasticsearch.fielddata.evictions": ("gauge", "indices.fielddata.evictions"),
}
ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0 = {
"elasticsearch.cache.filter.evictions": ("gauge", "indices.filter_cache.evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.filter_cache.memory_size_in_bytes"),
"elasticsearch.id_cache.size": ("gauge", "indices.id_cache.memory_size_in_bytes"),
}
ADDITIONAL_METRICS_PRE_0_90_5 = {
"elasticsearch.cache.field.evictions": ("gauge", "indices.cache.field_evictions"),
"elasticsearch.cache.field.size": ("gauge", "indices.cache.field_size_in_bytes"),
"elasticsearch.cache.filter.count": ("gauge", "indices.cache.filter_count"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.cache.filter_evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.cache.filter_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_0_0 = {
"elasticsearch.indices.translog.size_in_bytes": ("gauge", "indices.translog.size_in_bytes"),
"elasticsearch.indices.translog.operations": ("gauge", "indices.translog.operations"),
}
ADDITIONAL_METRICS_1_x = { # Stats are only valid for v1.x
"elasticsearch.fs.total.disk_reads": ("rate", "fs.total.disk_reads"),
"elasticsearch.fs.total.disk_writes": ("rate", "fs.total.disk_writes"),
"elasticsearch.fs.total.disk_io_op": ("rate", "fs.total.disk_io_op"),
"elasticsearch.fs.total.disk_read_size_in_bytes": ("gauge", "fs.total.disk_read_size_in_bytes"),
"elasticsearch.fs.total.disk_write_size_in_bytes": ("gauge", "fs.total.disk_write_size_in_bytes"),
"elasticsearch.fs.total.disk_io_size_in_bytes": ("gauge", "fs.total.disk_io_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_3_0 = {
"elasticsearch.indices.segments.index_writer_memory_in_bytes": ("gauge", "indices.segments.index_writer_memory_in_bytes"),
"elasticsearch.indices.segments.version_map_memory_in_bytes": ("gauge", "indices.segments.version_map_memory_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_4_0 = {
"elasticsearch.indices.indexing.throttle_time": ("rate", "indices.indexing.throttle_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indices.query_cache.memory_size_in_bytes": ("gauge", "indices.query_cache.memory_size_in_bytes"),
"elasticsearch.indices.query_cache.hit_count": ("rate", "indices.query_cache.hit_count"),
"elasticsearch.indices.query_cache.miss_count": ("rate", "indices.query_cache.miss_count"),
"elasticsearch.indices.query_cache.evictions": ("rate", "indices.query_cache.evictions"),
"elasticsearch.indices.segments.index_writer_max_memory_in_bytes": ("gauge", "indices.segments.index_writer_max_memory_in_bytes"),
"elasticsearch.indices.segments.fixed_bit_set_memory_in_bytes": ("gauge", "indices.segments.fixed_bit_set_memory_in_bytes"),
"elasticsearch.breakers.fielddata.estimated_size_in_bytes": ("gauge", "breakers.fielddata.estimated_size_in_bytes"),
"elasticsearch.breakers.fielddata.overhead": ("gauge", "breakers.fielddata.overhead"),
"elasticsearch.breakers.fielddata.tripped": ("rate", "breakers.fielddata.tripped"),
"elasticsearch.breakers.parent.estimated_size_in_bytes": ("gauge", "breakers.parent.estimated_size_in_bytes"),
"elasticsearch.breakers.parent.overhead": ("gauge", "breakers.parent.overhead"),
"elasticsearch.breakers.parent.tripped": ("rate", "breakers.parent.tripped"),
"elasticsearch.breakers.request.estimated_size_in_bytes": ("gauge", "breakers.request.estimated_size_in_bytes"),
"elasticsearch.breakers.request.overhead": ("gauge", "breakers.request.overhead"),
"elasticsearch.breakers.request.tripped": ("rate", "breakers.request.tripped"),
"elasticsearch.thread_pool.listener.active": ("gauge", "thread_pool.listener.active"),
"elasticsearch.thread_pool.listener.threads": ("gauge", "thread_pool.listener.threads"),
"elasticsearch.thread_pool.listener.queue": ("gauge", "thread_pool.listener.queue"),
"elasticsearch.thread_pool.listener.rejected": ("rate", "thread_pool.listener.rejected"),
}
ADDITIONAL_METRICS_POST_1_5_0 = {
"elasticsearch.indices.recovery.current_as_source": ("gauge", "indices.recovery.current_as_source"),
"elasticsearch.indices.recovery.current_as_target": ("gauge", "indices.recovery.current_as_target"),
"elasticsearch.indices.recovery.throttle_time": ("rate", "indices.recovery.throttle_time_in_millis", lambda v: float(v)/1000),
}
ADDITIONAL_METRICS_POST_1_6_0 = {
"elasticsearch.thread_pool.fetch_shard_started.active": ("gauge", "thread_pool.fetch_shard_started.active"),
"elasticsearch.thread_pool.fetch_shard_started.threads": ("gauge", "thread_pool.fetch_shard_started.threads"),
"elasticsearch.thread_pool.fetch_shard_started.queue": ("gauge", "thread_pool.fetch_shard_started.queue"),
"elasticsearch.thread_pool.fetch_shard_started.rejected": ("rate", "thread_pool.fetch_shard_started.rejected"),
"elasticsearch.thread_pool.fetch_shard_store.active": ("gauge", "thread_pool.fetch_shard_store.active"),
"elasticsearch.thread_pool.fetch_shard_store.threads": ("gauge", "thread_pool.fetch_shard_store.threads"),
"elasticsearch.thread_pool.fetch_shard_store.queue": ("gauge", "thread_pool.fetch_shard_store.queue"),
"elasticsearch.thread_pool.fetch_shard_store.rejected": ("rate", "thread_pool.fetch_shard_store.rejected"),
}
ADDITIONAL_METRICS_PRE_2_0 = {
"elasticsearch.thread_pool.merge.active": ("gauge", "thread_pool.merge.active"),
"elasticsearch.thread_pool.merge.threads": ("gauge", "thread_pool.merge.threads"),
"elasticsearch.thread_pool.merge.queue": ("gauge", "thread_pool.merge.queue"),
"elasticsearch.thread_pool.merge.rejected": ("rate", "thread_pool.merge.rejected"),
}
ADDITIONAL_METRICS_POST_2_0 = {
# Some of these may very well exist in previous ES versions, but not worth the time/effort
# to find where they were introduced
"elasticsearch.indices.query_cache.cache_size": ("gauge", "indices.query_cache.cache_size"),
"elasticsearch.indices.query_cache.cache_count": ("rate", "indices.query_cache.cache_count"),
"elasticsearch.indices.query_cache.total_count": ("rate", "indices.query_cache.total_count"),
"elasticsearch.indices.segments.doc_values_memory_in_bytes": ("gauge", "indices.segments.doc_values_memory_in_bytes"),
"elasticsearch.indices.segments.norms_memory_in_bytes": ("gauge", "indices.segments.norms_memory_in_bytes"),
"elasticsearch.indices.segments.stored_fields_memory_in_bytes": ("gauge", "indices.segments.stored_fields_memory_in_bytes"),
"elasticsearch.indices.segments.term_vectors_memory_in_bytes": ("gauge", "indices.segments.term_vectors_memory_in_bytes"),
"elasticsearch.indices.segments.terms_memory_in_bytes": ("gauge", "indices.segments.terms_memory_in_bytes"),
"elasticsearch.indices.request_cache.memory_size_in_bytes": ("gauge", "indices.request_cache.memory_size_in_bytes"),
"elasticsearch.indices.request_cache.evictions": ("rate", "indices.request_cache.evictions"),
"elasticsearch.indices.request_cache.hit_count": ("rate", "indices.request_cache.hit_count"),
"elasticsearch.indices.request_cache.miss_count": ("rate", "indices.request_cache.miss_count"),
}
ADDITIONAL_METRICS_POST_2_1 = {
"elasticsearch.indices.indexing.index_failed": ("rate", "indices.indexing.index_failed"),
"elasticsearch.thread_pool.force_merge.active": ("gauge", "thread_pool.force_merge.active"),
"elasticsearch.thread_pool.force_merge.threads": ("gauge", "thread_pool.force_merge.threads"),
"elasticsearch.thread_pool.force_merge.queue": ("gauge", "thread_pool.force_merge.queue"),
"elasticsearch.thread_pool.force_merge.rejected": ("rate", "thread_pool.force_merge.rejected"),
}
CLUSTER_HEALTH_METRICS = {
"elasticsearch.number_of_nodes": ("gauge", "number_of_nodes"),
"elasticsearch.number_of_data_nodes": ("gauge", "number_of_data_nodes"),
"elasticsearch.active_primary_shards": ("gauge", "active_primary_shards"),
"elasticsearch.active_shards": ("gauge", "active_shards"),
"elasticsearch.relocating_shards": ("gauge", "relocating_shards"),
"elasticsearch.initializing_shards": ("gauge", "initializing_shards"),
"elasticsearch.unassigned_shards": ("gauge", "unassigned_shards"),
"elasticsearch.cluster_status": ("gauge", "status", lambda v: {"red": 0, "yellow": 1, "green": 2}.get(v, -1)),
}
CLUSTER_PENDING_TASKS = {
"elasticsearch.pending_tasks_total": ("gauge", "pending_task_total"),
"elasticsearch.pending_tasks_priority_high": ("gauge", "pending_tasks_priority_high"),
"elasticsearch.pending_tasks_priority_urgent": ("gauge", "pending_tasks_priority_urgent")
}
SOURCE_TYPE_NAME = 'elasticsearch'
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Host status needs to persist across all checks
self.cluster_status = {}
def get_instance_config(self, instance):
url = instance.get('url')
if url is None:
raise Exception("A URL must be specified in the instance")
pshard_stats = _is_affirmative(instance.get('pshard_stats', False))
cluster_stats = _is_affirmative(instance.get('cluster_stats', False))
if 'is_external' in instance:
cluster_stats = _is_affirmative(instance.get('is_external', False))
pending_task_stats = _is_affirmative(instance.get('pending_task_stats', True))
# Support URLs that have a path in them from the config, for
# backwards-compatibility.
parsed = urlparse.urlparse(url)
if parsed[2] != "":
url = "%s://%s" % (parsed[0], parsed[1])
port = parsed.port
host = parsed.hostname
custom_tags = instance.get('tags', [])
service_check_tags = [
'host:%s' % host,
'port:%s' % port
]
service_check_tags.extend(custom_tags)
# Tag by URL so we can differentiate the metrics
# from multiple instances
tags = ['url:%s' % url]
tags.extend(custom_tags)
timeout = instance.get('timeout') or self.DEFAULT_TIMEOUT
config = ESInstanceConfig(
pshard_stats=pshard_stats,
cluster_stats=cluster_stats,
password=instance.get('password'),
service_check_tags=service_check_tags,
ssl_cert=instance.get('ssl_cert'),
ssl_key=instance.get('ssl_key'),
ssl_verify=instance.get('ssl_verify'),
tags=tags,
timeout=timeout,
url=url,
username=instance.get('username'),
pending_task_stats=pending_task_stats
)
return config
def check(self, instance):
config = self.get_instance_config(instance)
# Check ES version for this instance and define parameters
# (URLs and metrics) accordingly
version = self._get_es_version(config)
health_url, stats_url, pshard_stats_url, pending_tasks_url, stats_metrics, \
pshard_stats_metrics = self._define_params(version, config.cluster_stats)
# Load stats data.
# This must happen before other URL processing as the cluster name
# is retreived here, and added to the tag list.
stats_url = urlparse.urljoin(config.url, stats_url)
stats_data = self._get_data(stats_url, config)
if stats_data['cluster_name']:
# retreive the cluster name from the data, and append it to the
# master tag list.
config.tags.append("cluster_name:{}".format(stats_data['cluster_name']))
self._process_stats_data(stats_data, stats_metrics, config)
# Load clusterwise data
if config.pshard_stats:
pshard_stats_url = urlparse.urljoin(config.url, pshard_stats_url)
pshard_stats_data = self._get_data(pshard_stats_url, config)
self._process_pshard_stats_data(pshard_stats_data, config, pshard_stats_metrics)
# Load the health data.
health_url = urlparse.urljoin(config.url, health_url)
health_data = self._get_data(health_url, config)
self._process_health_data(health_data, config)
if config.pending_task_stats:
# Load the pending_tasks data.
pending_tasks_url = urlparse.urljoin(config.url, pending_tasks_url)
pending_tasks_data = self._get_data(pending_tasks_url, config)
self._process_pending_tasks_data(pending_tasks_data, config)
# If we're here we did not have any ES conn issues
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.OK,
tags=config.service_check_tags
)
def _get_es_version(self, config):
""" Get the running version of elasticsearch.
"""
try:
data = self._get_data(config.url, config, send_sc=False)
# pre-release versions of elasticearch are suffixed with -rcX etc..
# peel that off so that the map below doesn't error out
version = data['version']['number'].split('-')[0]
version = map(int, version.split('.')[0:3])
except Exception as e:
self.warning(
"Error while trying to get Elasticsearch version "
"from %s %s"
% (config.url, str(e))
)
version = [1, 0, 0]
self.service_metadata('version', version)
self.log.debug("Elasticsearch version is %s" % version)
return version
def _define_params(self, version, cluster_stats):
""" Define the set of URLs and METRICS to use depending on the
running ES version.
"""
pshard_stats_url = "/_stats"
if version >= [0, 90, 10]:
# ES versions 0.90.10 and above
health_url = "/_cluster/health?pretty=true"
pending_tasks_url = "/_cluster/pending_tasks?pretty=true"
# For "external" clusters, we want to collect from all nodes.
if cluster_stats:
stats_url = "/_nodes/stats"
else:
stats_url = "/_nodes/_local/stats"
if version < [5, 0, 0]:
# version 5 errors out if the `all` parameter is set
stats_url += "?all=true"
additional_metrics = self.JVM_METRICS_POST_0_90_10
else:
health_url = "/_cluster/health?pretty=true"
pending_tasks_url = None
if cluster_stats:
stats_url = "/_cluster/nodes/stats?all=true"
else:
stats_url = "/_cluster/nodes/_local/stats?all=true"
additional_metrics = self.JVM_METRICS_PRE_0_90_10
stats_metrics = dict(self.STATS_METRICS)
stats_metrics.update(additional_metrics)
### Additional Stats metrics ###
if version >= [0, 90, 5]:
# ES versions 0.90.5 and above
additional_metrics = self.ADDITIONAL_METRICS_POST_0_90_5
else:
# ES version 0.90.4 and below
additional_metrics = self.ADDITIONAL_METRICS_PRE_0_90_5
stats_metrics.update(additional_metrics)
if version >= [1, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_0_0)
if version < [2, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_PRE_2_0)
if version >= [0, 90, 5]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0)
if version >= [1, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_1_x)
if version >= [1, 3, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_3_0)
if version >= [1, 4, 0]:
# ES versions 1.4 and above
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_4_0)
if version >= [1, 5, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_5_0)
if version >= [1, 6, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_6_0)
if version >= [2, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_2_0)
if version >= [2, 1, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_2_1)
# Version specific stats metrics about the primary shards
pshard_stats_metrics = dict(self.PRIMARY_SHARD_METRICS)
if version >= [1, 0, 0]:
additional_metrics = self.PRIMARY_SHARD_METRICS_POST_1_0
pshard_stats_metrics.update(additional_metrics)
return health_url, stats_url, pshard_stats_url, pending_tasks_url, \
stats_metrics, pshard_stats_metrics
def _get_data(self, url, config, send_sc=True):
""" Hit a given URL and return the parsed json
"""
# Load basic authentication configuration, if available.
if config.username and config.password:
auth = (config.username, config.password)
else:
auth = None
# Load SSL configuration, if available.
# ssl_verify can be a bool or a string (http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification)
if isinstance(config.ssl_verify, bool) or isinstance(config.ssl_verify, str):
verify = config.ssl_verify
else:
verify = None
if config.ssl_cert and config.ssl_key:
cert = (config.ssl_cert, config.ssl_key)
elif config.ssl_cert:
cert = config.ssl_cert
else:
cert = None
try:
resp = requests.get(
url,
timeout=config.timeout,
headers=headers(self.agentConfig),
auth=auth,
verify=verify,
cert=cert
)
resp.raise_for_status()
except Exception as e:
if send_sc:
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.CRITICAL,
message="Error {0} when hitting {1}".format(e, url),
tags=config.service_check_tags
)
raise
return resp.json()
def _process_pending_tasks_data(self, data, config):
p_tasks = defaultdict(int)
for task in data.get('tasks', []):
p_tasks[task.get('priority')] += 1
node_data = {
'pending_task_total': sum(p_tasks.values()),
'pending_tasks_priority_high': p_tasks['high'],
'pending_tasks_priority_urgent': p_tasks['urgent'],
}
for metric in self.CLUSTER_PENDING_TASKS:
# metric description
desc = self.CLUSTER_PENDING_TASKS[metric]
self._process_metric(node_data, metric, *desc, tags=config.tags)
def _process_stats_data(self, data, stats_metrics, config):
cluster_stats = config.cluster_stats
for node_data in data['nodes'].itervalues():
metric_hostname = None
metrics_tags = list(config.tags)
# Resolve the node's name
node_name = node_data.get('name')
if node_name:
metrics_tags.append(
u"node_name:{}".format(node_name)
)
# Resolve the node's hostname
if cluster_stats:
for k in ['hostname', 'host']:
if k in node_data:
metric_hostname = node_data[k]
break
for metric, desc in stats_metrics.iteritems():
self._process_metric(
node_data, metric, *desc,
tags=metrics_tags, hostname=metric_hostname
)
def _process_pshard_stats_data(self, data, config, pshard_stats_metrics):
# Process number of indexes in cluster
if "indexes" in data:
self.gauge(self.PRIMARY_SHARD_INDEX_COUNT, len(data["indices"]), tags=config.tags)
for metric, desc in pshard_stats_metrics.iteritems():
self._process_metric(data, metric, *desc, tags=config.tags)
def _process_metric(self, data, metric, xtype, path, xform=None,
tags=None, hostname=None):
"""data: dictionary containing all the stats
metric: stackstate metric
path: corresponding path in data, flattened, e.g. thread_pool.bulk.queue
xfom: a lambda to apply to the numerical value
"""
value = data
# Traverse the nested dictionaries
for key in path.split('.'):
if value is not None:
value = value.get(key, None)
else:
break
if value is not None:
if xform:
value = xform(value)
if xtype == "gauge":
self.gauge(metric, value, tags=tags, hostname=hostname)
else:
self.rate(metric, value, tags=tags, hostname=hostname)
else:
self._metric_not_found(metric, path)
def _process_health_data(self, data, config):
if self.cluster_status.get(config.url) is None:
self.cluster_status[config.url] = data['status']
if data['status'] in ["yellow", "red"]:
event = self._create_event(data['status'], tags=config.tags)
self.event(event)
if data['status'] != self.cluster_status.get(config.url):
self.cluster_status[config.url] = data['status']
event = self._create_event(data['status'], tags=config.tags)
self.event(event)
for metric, desc in self.CLUSTER_HEALTH_METRICS.iteritems():
self._process_metric(data, metric, *desc, tags=config.tags)
# Process the service check
cluster_status = data['status']
if cluster_status == 'green':
status = AgentCheck.OK
data['tag'] = "OK"
elif cluster_status == 'yellow':
status = AgentCheck.WARNING
data['tag'] = "WARN"
else:
status = AgentCheck.CRITICAL
data['tag'] = "ALERT"
msg = "{tag} on cluster \"{cluster_name}\" "\
"| active_shards={active_shards} "\
"| initializing_shards={initializing_shards} "\
"| relocating_shards={relocating_shards} "\
"| unassigned_shards={unassigned_shards} "\
"| timed_out={timed_out}" \
.format(**data)
self.service_check(
self.SERVICE_CHECK_CLUSTER_STATUS,
status,
message=msg,
tags=config.service_check_tags
)
def _metric_not_found(self, metric, path):
self.log.debug("Metric not found: %s -> %s", path, metric)
def _create_event(self, status, tags=None):
hostname = self.hostname.decode('utf-8')
if status == "red":
alert_type = "error"
msg_title = "%s is %s" % (hostname, status)
elif status == "yellow":
alert_type = "warning"
msg_title = "%s is %s" % (hostname, status)
else:
# then it should be green
alert_type = "success"
msg_title = "%s recovered as %s" % (hostname, status)
msg = "ElasticSearch: %s just reported as %s" % (hostname, status)
return {
'timestamp': int(time.time()),
'event_type': 'elasticsearch',
'host': hostname,
'msg_text': msg,
'msg_title': msg_title,
'alert_type': alert_type,
'source_type_name': "elasticsearch",
'event_object': hostname,
'tags': tags
}
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.loss_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import loss
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class LossTest(test.TestCase):
def config_default_values(self):
self.batch_size = 2
self.sequence_length = 3
self.number_of_classes = 5
logits = [
constant_op.constant(i + 0.5, shape=[self.batch_size,
self.number_of_classes])
for i in range(self.sequence_length)
]
self.logits = array_ops.stack(logits, axis=1)
targets = [
constant_op.constant(i, dtypes.int32, shape=[self.batch_size])
for i in range(self.sequence_length)
]
self.targets = array_ops.stack(targets, axis=1)
weights = [
constant_op.constant(1.0, shape=[self.batch_size])
for _ in range(self.sequence_length)
]
self.weights = array_ops.stack(weights, axis=1)
# expected_loss = sparse_softmax_cross_entropy_with_logits(targets, logits)
# where targets = [0, 1, 2], and logits = [[0.5] * 5, [1.5] * 5, [2.5] * 5]
self.expected_loss = 1.60944
def testSequenceLoss(self):
self.config_default_values()
with self.cached_session(use_gpu=True):
average_loss_per_example = loss.sequence_loss(
self.logits, self.targets, self.weights,
average_across_timesteps=True,
average_across_batch=True)
res = self.evaluate(average_loss_per_example)
self.assertAllClose(self.expected_loss, res)
average_loss_per_sequence = loss.sequence_loss(
self.logits, self.targets, self.weights,
average_across_timesteps=False,
average_across_batch=True)
res = self.evaluate(average_loss_per_sequence)
compare_per_sequence = np.full((self.sequence_length), self.expected_loss)
self.assertAllClose(compare_per_sequence, res)
average_loss_per_batch = loss.sequence_loss(
self.logits, self.targets, self.weights,
average_across_timesteps=True,
average_across_batch=False)
res = self.evaluate(average_loss_per_batch)
compare_per_batch = np.full((self.batch_size), self.expected_loss)
self.assertAllClose(compare_per_batch, res)
total_loss = loss.sequence_loss(
self.logits, self.targets, self.weights,
average_across_timesteps=False,
average_across_batch=False)
res = self.evaluate(total_loss)
compare_total = np.full((self.batch_size, self.sequence_length),
self.expected_loss)
self.assertAllClose(compare_total, res)
def testSequenceLossClass(self):
self.config_default_values()
with self.cached_session(use_gpu=True):
seq_loss = loss.SequenceLoss(average_across_timesteps=True,
average_across_batch=True,
sum_over_timesteps=False,
sum_over_batch=False)
average_loss_per_example = seq_loss(
self.targets, self.logits, self.weights)
res = self.evaluate(average_loss_per_example)
self.assertAllClose(self.expected_loss, res)
seq_loss = loss.SequenceLoss(average_across_timesteps=False,
average_across_batch=True,
sum_over_timesteps=False,
sum_over_batch=False)
average_loss_per_sequence = seq_loss(
self.targets, self.logits, self.weights)
res = self.evaluate(average_loss_per_sequence)
compare_per_sequence = np.full((self.sequence_length), self.expected_loss)
self.assertAllClose(compare_per_sequence, res)
seq_loss = loss.SequenceLoss(average_across_timesteps=True,
average_across_batch=False,
sum_over_timesteps=False,
sum_over_batch=False)
average_loss_per_batch = seq_loss(
self.targets, self.logits, self.weights)
res = self.evaluate(average_loss_per_batch)
compare_per_batch = np.full((self.batch_size), self.expected_loss)
self.assertAllClose(compare_per_batch, res)
seq_loss = loss.SequenceLoss(average_across_timesteps=False,
average_across_batch=False,
sum_over_timesteps=False,
sum_over_batch=False)
total_loss = seq_loss(
self.targets, self.logits, self.weights)
res = self.evaluate(total_loss)
compare_total = np.full((self.batch_size, self.sequence_length),
self.expected_loss)
self.assertAllClose(compare_total, res)
def testSumReduction(self):
self.config_default_values()
with self.cached_session(use_gpu=True):
seq_loss = loss.SequenceLoss(average_across_timesteps=False,
average_across_batch=False,
sum_over_timesteps=True,
sum_over_batch=True)
average_loss_per_example = seq_loss(
self.targets, self.logits, self.weights)
res = self.evaluate(average_loss_per_example)
self.assertAllClose(self.expected_loss, res)
seq_loss = loss.SequenceLoss(average_across_timesteps=False,
average_across_batch=False,
sum_over_timesteps=False,
sum_over_batch=True)
average_loss_per_sequence = seq_loss(
self.targets, self.logits, self.weights)
res = self.evaluate(average_loss_per_sequence)
compare_per_sequence = np.full((self.sequence_length), self.expected_loss)
self.assertAllClose(compare_per_sequence, res)
seq_loss = loss.SequenceLoss(average_across_timesteps=False,
average_across_batch=False,
sum_over_timesteps=True,
sum_over_batch=False)
average_loss_per_batch = seq_loss(
self.targets, self.logits, self.weights)
res = self.evaluate(average_loss_per_batch)
compare_per_batch = np.full((self.batch_size), self.expected_loss)
self.assertAllClose(compare_per_batch, res)
seq_loss = loss.SequenceLoss(average_across_timesteps=False,
average_across_batch=False,
sum_over_timesteps=False,
sum_over_batch=False)
total_loss = seq_loss(
self.targets, self.logits, self.weights)
res = self.evaluate(total_loss)
compare_total = np.full((self.batch_size, self.sequence_length),
self.expected_loss)
self.assertAllClose(compare_total, res)
def testWeightedSumReduction(self):
self.config_default_values()
weights = [
constant_op.constant(1.0, shape=[self.batch_size])
for _ in range(self.sequence_length)
]
# Make the last element in the sequence to have zero weights.
weights[-1] = constant_op.constant(0.0, shape=[self.batch_size])
self.weights = array_ops.stack(weights, axis=1)
with self.cached_session(use_gpu=True):
seq_loss = loss.SequenceLoss(average_across_timesteps=False,
average_across_batch=False,
sum_over_timesteps=True,
sum_over_batch=True)
average_loss_per_example = seq_loss(
self.targets, self.logits, self.weights)
res = self.evaluate(average_loss_per_example)
self.assertAllClose(self.expected_loss, res)
seq_loss = loss.SequenceLoss(average_across_timesteps=False,
average_across_batch=False,
sum_over_timesteps=False,
sum_over_batch=True)
average_loss_per_sequence = seq_loss(
self.targets, self.logits, self.weights)
res = self.evaluate(average_loss_per_sequence)
compare_per_sequence = np.full((self.sequence_length), self.expected_loss)
# The last element in every sequence are zeros, which will be filtered.
compare_per_sequence[-1] = 0.
self.assertAllClose(compare_per_sequence, res)
seq_loss = loss.SequenceLoss(average_across_timesteps=False,
average_across_batch=False,
sum_over_timesteps=True,
sum_over_batch=False)
average_loss_per_batch = seq_loss(self.targets, self.logits, self.weights)
res = self.evaluate(average_loss_per_batch)
compare_per_batch = np.full((self.batch_size), self.expected_loss)
self.assertAllClose(compare_per_batch, res)
seq_loss = loss.SequenceLoss(average_across_timesteps=False,
average_across_batch=False,
sum_over_timesteps=False,
sum_over_batch=False)
total_loss = seq_loss(self.targets, self.logits, self.weights)
res = self.evaluate(total_loss)
compare_total = np.full((self.batch_size, self.sequence_length),
self.expected_loss)
# The last element in every sequence are zeros, which will be filtered.
compare_total[:, -1] = 0
self.assertAllClose(compare_total, res)
def testZeroWeights(self):
self.config_default_values()
weights = [
constant_op.constant(0.0, shape=[self.batch_size])
for _ in range(self.sequence_length)
]
weights = array_ops.stack(weights, axis=1)
with self.cached_session(use_gpu=True):
average_loss_per_example = loss.sequence_loss(
self.logits, self.targets, weights,
average_across_timesteps=True,
average_across_batch=True)
res = self.evaluate(average_loss_per_example)
self.assertAllClose(0.0, res)
average_loss_per_sequence = loss.sequence_loss(
self.logits, self.targets, weights,
average_across_timesteps=False,
average_across_batch=True)
res = self.evaluate(average_loss_per_sequence)
compare_per_sequence = np.zeros((self.sequence_length))
self.assertAllClose(compare_per_sequence, res)
average_loss_per_batch = loss.sequence_loss(
self.logits, self.targets, weights,
average_across_timesteps=True,
average_across_batch=False)
res = self.evaluate(average_loss_per_batch)
compare_per_batch = np.zeros((self.batch_size))
self.assertAllClose(compare_per_batch, res)
total_loss = loss.sequence_loss(
self.logits, self.targets, weights,
average_across_timesteps=False,
average_across_batch=False)
res = self.evaluate(total_loss)
compare_total = np.zeros((self.batch_size, self.sequence_length))
self.assertAllClose(compare_total, res)
if __name__ == '__main__':
test.main()
|
|
# class analyzer
# analyze a text returning some stats and Most_Freq_Words
import nltk
import string
import numpy
import re
from nltk.stem.porter import *
from nltk.corpus import stopwords
from nltk import wordpunct_tokenize
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from tools import tokenizer
from tools import sum_luhn
from tools import summary
N_MOST_FREQ_WORDS = 20 #number of returned most freq words
N = 170 # number of words to consider
class Analyzer():
"""Class to analyzer the text
"""
# inizializing the Analyzer object
def __init__(self,txt):
self.sentences = 0
self.words = 0
self.unique_words = 0
self.hapaxes = 0
self.most_freq_words = []
self.most_freq_stem_words = []
self.most_freq_stem_words_wn = []
self.entities = {}
self._process(txt)
def _process(self,txt):
""" The function process the text and returning the results in the class object
Args:
txt: input text to be analyzed.
Returns: True
"""
self.lang = self.detect_lang(txt)
words = []
stemmed_words = []
stemmed_words_wordnet = []
if self.lang != '':
stop_words = nltk.corpus.stopwords.words(self.lang)
try:
stemmer = SnowballStemmer(self.lang,ignore_stopwords=True)
except:
print "WARNING: the Snowball stemmer is not supporting the detected language. Assuming english."
stemmer = SnowballStemmer("english",ignore_stopwords=True)
else:
print "WARNING: no detected language. Assuming english."
stop_words = nltk.corpus.stopwords.words('english')
self.STOP_WORDS = stop_words+list(string.punctuation)+['``','\'\'','\'s','--']
# tokenizing sentences - EOS detection
sentences = tokenizer.get_sentences(txt)
self.sentences = len(sentences)
# tokenizing words
words = tokenizer.get_words(sentences)
# extract most freq words
self.most_freq_words = self.get_most_freq_words(words,type='original')
# extract most freq words in a stemmed words list using SnowBall stemmer
# and filtering here the stop words
for w in words:
if w not in self.STOP_WORDS:
stemmed_words.append(stemmer.stem(w))
self.most_freq_stem_words = self.get_most_freq_words(stemmed_words,type='stemmed')
# extract most freq words in a stemmed words list using WordNetLemmatizer
# and filtering here the stop words
lmtz = WordNetLemmatizer()
for w in words:
if w not in self.STOP_WORDS:
stemmed_words_wordnet.append(lmtz.lemmatize(w))
self.most_freq_stem_words_wn = self.get_most_freq_words(stemmed_words_wordnet,type='stemmed')
# extracting and savint the Entities
self.entities = self.get_entities(sentences)
# extracting the two Luhn summaries
self.summary_top_n, self.summary_mean_scored = sum_luhn.summarize(sentences,self.STOP_WORDS)
# extractin intersection summery
self.abs_summary = summary.Summary(txt)
return 1
def get_most_freq_words(self,words,type):
""" Function receive a word list and return a list with the N most freq words
Args:
words: the words list
type: define if the words list is the original one or a stemmed one
Returns:
a list with the N_MOST_FREQ_WORDS most frequent words
"""
most_freq_words = []
words_cleaned = []
## doing some custom cleaning for some text very bad formatted
for w in words:
words_cleaned.append(re.sub(r'\.', ' ', w))
# getting the words frequency distribution
fdist = nltk.FreqDist(words_cleaned)
# saving the words counts only with the full words list
if type == 'original':
self.words = sum([i[1] for i in fdist.items()])
self.unique_words = len(fdist.keys())
# Hapaxes are words that appear only once
self.hapaxes = len(fdist.hapaxes())
words_no_stop = dict([w for w in fdist.items() if w[0] not in self.STOP_WORDS])
# ordering the results
keys_most_freq_words = sorted(words_no_stop, key=words_no_stop.__getitem__, reverse=True)
for k in keys_most_freq_words:
if len(k)>1:
most_freq_words.append((k,words_no_stop[k]))
# return the result in ordered list
return most_freq_words[:N_MOST_FREQ_WORDS]
def detect_lang(self,text):
""" Returns the detected language.
Args:
text: input text
Returns:
the detectred language string
"""
language_ratio = {}
words = wordpunct_tokenize(text)
for language in stopwords.fileids():
stopwords_set = set(stopwords.words(language))
words_set = set(words)
common_words = words_set.intersection(stopwords_set)
language_ratio[language] = len(common_words)
detected_lang = max(language_ratio, key=language_ratio.get)
return detected_lang
def stemmer(self,word):
""" Returns the stemmed version of the input word using a Porter Stemmer.
"""
stemm = PorterStemmer()
return stemm.stem(word)
def get_entities(self,sentences):
""" The function returns the dictionary containing the results for
the Name Entity Recognition analyze.
Args:
sentences: the sentences list.
Returns:
dictionary:
"""
entities = dict([])
# Tokenization
tokens = [nltk.tokenize.word_tokenize(s) for s in sentences]
# Part-Of-Speech tagging
pos_tagged_tokens = [nltk.pos_tag(t) for t in tokens]
# Chunking
chunked_nes = [nltk.ne_chunk(c) for c in pos_tagged_tokens]
for tree in chunked_nes:
for s in tree.subtrees(lambda t: (t.height()==2)):
if s.label()!='S':
entity = ' '.join(i[0] for i in s.leaves())
if s.label() in entities.keys():
if entity not in entities[s.label()]:
entities[s.label()].append(entity)
entities[s.label()].sort()
else:
entities[s.label()] = [entity]
return entities
if __name__ == "__main__":
main()
|
|
from __future__ import division, absolute_import, print_function
__all__ = ['memmap']
import warnings
import sys
import numpy as np
from .numeric import uint8, ndarray, dtype
from numpy.compat import long, basestring
dtypedescr = dtype
valid_filemodes = ["r", "c", "r+", "w+"]
writeable_filemodes = ["r+", "w+"]
mode_equivalents = {
"readonly":"r",
"copyonwrite":"c",
"readwrite":"r+",
"write":"w+"
}
class memmap(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. Numpy's
memmap's are array-like objects. This differs from Python's ``mmap``
module, which uses file-like objects.
This subclass of ndarray has some unpleasant interactions with
some operations, because it doesn't quite fit properly as a subclass.
An alternative to using this subclass is to create the ``mmap``
object yourself, then create an ndarray with ndarray.__new__ directly,
passing the object created in its 'buffer=' parameter.
This class may at some point be turned into a factory function
which returns a view into an mmap buffer.
Delete the memmap instance to close.
Parameters
----------
filename : str or file-like object
The file name or file object to be used as the array data buffer.
dtype : data-type, optional
The data-type used to interpret the file contents.
Default is `uint8`.
mode : {'r+', 'r', 'w+', 'c'}, optional
The file is opened in this mode:
+------+-------------------------------------------------------------+
| 'r' | Open existing file for reading only. |
+------+-------------------------------------------------------------+
| 'r+' | Open existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'w+' | Create or overwrite existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'c' | Copy-on-write: assignments affect data in memory, but |
| | changes are not saved to disk. The file on disk is |
| | read-only. |
+------+-------------------------------------------------------------+
Default is 'r+'.
offset : int, optional
In the file, array data starts at this offset. Since `offset` is
measured in bytes, it should normally be a multiple of the byte-size
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
file are valid; The file will be extended to accommodate the
additional data. By default, ``memmap`` will start at the beginning of
the file, even if ``filename`` is a file pointer ``fp`` and
``fp.tell() != 0``.
shape : tuple, optional
The desired shape of the array. If ``mode == 'r'`` and the number
of remaining bytes after `offset` is not a multiple of the byte-size
of `dtype`, you must specify `shape`. By default, the returned array
will be 1-D with the number of elements determined by file size
and data-type.
order : {'C', 'F'}, optional
Specify the order of the ndarray memory layout:
:term:`row-major`, C-style or :term:`column-major`,
Fortran-style. This only has an effect if the shape is
greater than 1-D. The default order is 'C'.
Attributes
----------
filename : str
Path to the mapped file.
offset : int
Offset position in the file.
mode : str
File mode.
Methods
-------
flush
Flush any changes in memory to file on disk.
When you delete a memmap object, flush is called first to write
changes to disk before removing the object.
Notes
-----
The memmap object can be used anywhere an ndarray is accepted.
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
``True``.
Memory-mapped arrays use the Python memory-map object which
(prior to Python 2.5) does not allow files to be larger than a
certain size depending on the platform. This size is always < 2GB
even on 64-bit systems.
When a memmap causes a file to be created or extended beyond its
current size in the filesystem, the contents of the new part are
unspecified. On systems with POSIX filesystem semantics, the extended
part will be filled with zero bytes.
Examples
--------
>>> data = np.arange(12, dtype='float32')
>>> data.resize((3,4))
This example uses a temporary file so that doctest doesn't write
files to your directory. You would use a 'normal' filename.
>>> from tempfile import mkdtemp
>>> import os.path as path
>>> filename = path.join(mkdtemp(), 'newfile.dat')
Create a memmap with dtype and shape that matches our data:
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
>>> fp
memmap([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
Write data to memmap array:
>>> fp[:] = data[:]
>>> fp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fp.filename == path.abspath(filename)
True
Deletion flushes memory changes to disk before removing the object:
>>> del fp
Load the memmap and verify data was stored:
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> newfp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Read-only memmap:
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> fpr.flags.writeable
False
Copy-on-write memmap:
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
>>> fpc.flags.writeable
True
It's possible to assign to copy-on-write array, but values are only
written into the memory copy of the array, and not written to disk:
>>> fpc
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fpc[0,:] = 0
>>> fpc
memmap([[ 0., 0., 0., 0.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
File on disk is unchanged:
>>> fpr
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Offset into a memmap:
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
>>> fpo
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
"""
__array_priority__ = -100.0
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
shape=None, order='C'):
# Import here to minimize 'import numpy' overhead
import mmap
import os.path
try:
mode = mode_equivalents[mode]
except KeyError:
if mode not in valid_filemodes:
raise ValueError("mode must be one of %s" %
(valid_filemodes + list(mode_equivalents.keys())))
if hasattr(filename, 'read'):
fid = filename
own_file = False
else:
fid = open(filename, (mode == 'c' and 'r' or mode)+'b')
own_file = True
if (mode == 'w+') and shape is None:
raise ValueError("shape must be given")
fid.seek(0, 2)
flen = fid.tell()
descr = dtypedescr(dtype)
_dbytes = descr.itemsize
if shape is None:
bytes = flen - offset
if (bytes % _dbytes):
fid.close()
raise ValueError("Size of available data is not a "
"multiple of the data-type size.")
size = bytes // _dbytes
shape = (size,)
else:
if not isinstance(shape, tuple):
shape = (shape,)
size = 1
for k in shape:
size *= k
bytes = long(offset + size*_dbytes)
if mode == 'w+' or (mode == 'r+' and flen < bytes):
fid.seek(bytes - 1, 0)
fid.write(np.compat.asbytes('\0'))
fid.flush()
if mode == 'c':
acc = mmap.ACCESS_COPY
elif mode == 'r':
acc = mmap.ACCESS_READ
else:
acc = mmap.ACCESS_WRITE
start = offset - offset % mmap.ALLOCATIONGRANULARITY
bytes -= start
offset -= start
mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
offset=offset, order=order)
self._mmap = mm
self.offset = offset
self.mode = mode
if isinstance(filename, basestring):
self.filename = os.path.abspath(filename)
# py3 returns int for TemporaryFile().name
elif (hasattr(filename, "name") and
isinstance(filename.name, basestring)):
self.filename = os.path.abspath(filename.name)
# same as memmap copies (e.g. memmap + 1)
else:
self.filename = None
if own_file:
fid.close()
return self
def __array_finalize__(self, obj):
if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
self._mmap = obj._mmap
self.filename = obj.filename
self.offset = obj.offset
self.mode = obj.mode
else:
self._mmap = None
self.filename = None
self.offset = None
self.mode = None
def flush(self):
"""
Write any changes in the array to the file on disk.
For further information, see `memmap`.
Parameters
----------
None
See Also
--------
memmap
"""
if self.base is not None and hasattr(self.base, 'flush'):
self.base.flush()
|
|
from contextlib import contextmanager
from jinja2 import Undefined
from werkzeug.local import LocalProxy
from werkzeug.local import LocalStack
from lektor.reporter import reporter
_ctx_stack = LocalStack()
def url_to(*args, **kwargs):
"""Calculates a URL to another record."""
ctx = get_ctx()
if ctx is None:
raise RuntimeError("No context found")
return ctx.url_to(*args, **kwargs)
def get_asset_url(asset):
"""Calculates the asset URL relative to the current record."""
ctx = get_ctx()
if ctx is None:
raise RuntimeError("No context found")
asset = site_proxy.get_asset(asset)
if asset is None:
return Undefined("Asset not found")
info = ctx.build_state.get_file_info(asset.source_filename)
return "%s?h=%s" % (
ctx.source.url_to("!" + asset.url_path),
info.checksum[:8],
)
@LocalProxy
def site_proxy():
"""Returns the current pad."""
ctx = get_ctx()
if ctx is None:
return Undefined(hint="Cannot access the site from here", name="site")
return ctx.pad
@LocalProxy
def config_proxy():
"""Returns the current config."""
return site_proxy.db.config
def get_ctx():
"""Returns the current context."""
return _ctx_stack.top
def get_locale(default="en_US"):
"""Returns the current locale."""
ctx = get_ctx()
if ctx is not None:
rv = ctx.locale
if rv is not None:
return rv
return ctx.pad.db.config.site_locale
return default
class Context:
"""The context is a thread local object that provides the system with
general information about in which state it is. The context is created
whenever a source is processed and can be accessed by template engine and
other things.
It's considered read and write and also accumulates changes that happen
during processing of the object.
"""
def __init__(self, artifact=None, pad=None):
if pad is None:
if artifact is None:
raise TypeError(
"Either artifact or pad is needed to " "construct a context."
)
pad = artifact.build_state.pad
if artifact is not None:
self.artifact = artifact
self.source = artifact.source_obj
self.build_state = self.artifact.build_state
else:
self.artifact = None
self.source = None
self.build_state = None
self.exc_info = None
self.pad = pad
# Processing information
self.referenced_dependencies = set()
self.referenced_virtual_dependencies = {}
self.sub_artifacts = []
self.flow_block_render_stack = []
self._forced_base_url = None
# General cache system where other things can put their temporary
# stuff in.
self.cache = {}
self._dependency_collectors = []
@property
def env(self):
"""The environment of the context."""
return self.pad.db.env
@property
def record(self):
"""If the source is a record it will be available here."""
rv = self.source
if rv is not None and rv.source_classification == "record":
return rv
return None
@property
def locale(self):
"""Returns the current locale if it's available, otherwise `None`.
This does not fall back to the site locale.
"""
source = self.source
if source is not None:
alt_cfg = self.pad.db.config["ALTERNATIVES"].get(source.alt)
if alt_cfg:
return alt_cfg["locale"]
return None
def push(self):
_ctx_stack.push(self)
@staticmethod
def pop():
_ctx_stack.pop()
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop()
@property
def base_url(self):
"""The URL path for the current context."""
if self._forced_base_url:
return self._forced_base_url
if self.source is not None:
return self.source.url_path
return "/"
def url_to(self, path, alt=None, absolute=None, external=None):
"""Returns a URL to another path."""
if self.source is None:
raise RuntimeError(
"Can only generate paths to other pages if "
"the context has a source document set."
)
rv = self.source.url_to(path, alt=alt, absolute=True)
return self.pad.make_url(rv, self.base_url, absolute, external)
def sub_artifact(self, *args, **kwargs):
"""Decorator version of :func:`add_sub_artifact`."""
def decorator(f):
self.add_sub_artifact(build_func=f, *args, **kwargs)
return f
return decorator
def add_sub_artifact(
self,
artifact_name,
build_func=None,
sources=None,
source_obj=None,
config_hash=None,
):
"""Sometimes it can happen that while building an artifact another
artifact needs building. This function is generally used to record
this request.
"""
if self.build_state is None:
raise TypeError(
"The context does not have a build state which "
"means that artifact declaration is not possible."
)
aft = self.build_state.new_artifact(
artifact_name=artifact_name,
sources=sources,
source_obj=source_obj,
config_hash=config_hash,
)
self.sub_artifacts.append((aft, build_func))
reporter.report_sub_artifact(aft)
def record_dependency(self, filename):
"""Records a dependency from processing."""
self.referenced_dependencies.add(filename)
for coll in self._dependency_collectors:
coll(filename)
def record_virtual_dependency(self, virtual_source):
"""Records a dependency from processing."""
path = virtual_source.path
self.referenced_virtual_dependencies[path] = virtual_source
for coll in self._dependency_collectors:
coll(virtual_source)
@contextmanager
def gather_dependencies(self, func):
"""For the duration of the `with` block the provided function will be
invoked for all dependencies encountered.
"""
self._dependency_collectors.append(func)
try:
yield
finally:
self._dependency_collectors.pop()
@contextmanager
def changed_base_url(self, value):
"""Temporarily overrides the URL path of the context."""
old = self._forced_base_url
self._forced_base_url = value
try:
yield
finally:
self._forced_base_url = old
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings # noqa
from django.core.urlresolvers import reverse # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.domains import constants
LOG = logging.getLogger(__name__)
class CreateDomainInfoAction(workflows.Action):
name = forms.CharField(label=_("Name"),
required=True)
description = forms.CharField(widget=forms.widgets.Textarea(),
label=_("Description"),
required=False)
enabled = forms.BooleanField(label=_("Enabled"),
required=False,
initial=True)
class Meta:
name = _("Domain Info")
slug = "create_domain"
help_text = _("From here you can create a new domain to organize "
"projects, groups and users.")
class CreateDomainInfo(workflows.Step):
action_class = CreateDomainInfoAction
contributes = ("domain_id",
"name",
"description",
"enabled")
class UpdateDomainGroupsAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateDomainGroupsAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve group list. Please try again later.')
domain_id = ''
if 'domain_id' in args[0]:
domain_id = args[0]['domain_id']
# Get the default role
try:
default_role = api.keystone.get_default_role(self.request)
# Default role is necessary to add members to a domain
if default_role is None:
default = getattr(settings,
"OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
msg = _('Could not find default role "%s" in Keystone') % \
default
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(constants.DOMAINS_INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available groups
all_groups = []
try:
all_groups = api.keystone.group_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
groups_list = [(group.id, group.name) for group in all_groups]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(constants.DOMAINS_INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = _(role.name)
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = groups_list
self.fields[field_name].initial = []
# Figure out groups & roles
if domain_id:
for group in all_groups:
try:
roles = api.keystone.roles_for_group(self.request,
group=group.id,
domain=domain_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(
constants.DOMAINS_INDEX_URL))
for role in roles:
field_name = self.get_member_field_name(role.id)
self.fields[field_name].initial.append(group.id)
class Meta:
name = _("Domain Groups")
slug = constants.DOMAIN_GROUP_MEMBER_SLUG
class UpdateDomainGroups(workflows.UpdateMembersStep):
action_class = UpdateDomainGroupsAction
available_list_title = _("All Groups")
members_list_title = _("Domain Groups")
no_available_text = _("No groups found.")
no_members_text = _("No groups.")
def contribute(self, data, context):
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve role list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class CreateDomain(workflows.Workflow):
slug = "create_domain"
name = _("Create Domain")
finalize_button_name = _("Create Domain")
success_message = _('Created new domain "%s".')
failure_message = _('Unable to create domain "%s".')
success_url = constants.DOMAINS_INDEX_URL
default_steps = (CreateDomainInfo, )
def format_status_message(self, message):
return message % self.context.get('name', 'unknown domain')
def handle(self, request, data):
# create the domain
try:
LOG.info('Creating domain with name "%s"' % data['name'])
desc = data['description']
api.keystone.domain_create(request,
name=data['name'],
description=desc,
enabled=data['enabled'])
except Exception:
exceptions.handle(request, ignore=True)
return False
return True
class UpdateDomainInfoAction(CreateDomainInfoAction):
class Meta:
name = _("Domain Info")
slug = 'update_domain'
help_text = _("From here you can edit the domain details.")
class UpdateDomainInfo(workflows.Step):
action_class = UpdateDomainInfoAction
depends_on = ("domain_id",)
contributes = ("name",
"description",
"enabled")
class UpdateDomain(workflows.Workflow):
slug = "update_domain"
name = _("Edit Domain")
finalize_button_name = _("Save")
success_message = _('Modified domain "%s".')
failure_message = _('Unable to modify domain "%s".')
success_url = constants.DOMAINS_INDEX_URL
default_steps = (UpdateDomainInfo,
UpdateDomainGroups)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown domain')
def handle(self, request, data):
domain_id = data.pop('domain_id')
try:
LOG.info('Updating domain with name "%s"' % data['name'])
api.keystone.domain_update(request,
domain_id=domain_id,
name=data['name'],
description=data['description'],
enabled=data['enabled'])
except Exception:
exceptions.handle(request, ignore=True)
return False
# update domain groups
groups_to_modify = 0
member_step = self.get_step(constants.DOMAIN_GROUP_MEMBER_SLUG)
try:
# Get our role options
available_roles = api.keystone.role_list(request)
# Get the groups currently associated with this domain so we
# can diff against it.
domain_groups = api.keystone.group_list(request,
domain=domain_id)
groups_to_modify = len(domain_groups)
for group in domain_groups:
# Check if there have been any changes in the roles of
# Existing domain members.
current_roles = api.keystone.roles_for_group(
self.request,
group=group.id,
domain=domain_id)
current_role_ids = [role.id for role in current_roles]
for role in available_roles:
# Check if the group is in the list of groups with
# this role.
field_name = member_step.get_member_field_name(role.id)
if group.id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# group role has changed
api.keystone.add_group_role(
request,
role=role.id,
group=group.id,
domain=domain_id)
else:
# Group role is unchanged, so remove it from
# the remaining roles list to avoid removing it
# later
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
# Revoke any removed roles.
for id_to_delete in current_role_ids:
api.keystone.remove_group_role(request,
role=id_to_delete,
group=group.id,
domain=domain_id)
groups_to_modify -= 1
# Grant new roles on the domain.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many groups may be added for error handling.
groups_to_modify += len(data[field_name])
for role in available_roles:
groups_added = 0
field_name = member_step.get_member_field_name(role.id)
for group_id in data[field_name]:
if not filter(lambda x: group_id == x.id, domain_groups):
api.keystone.add_group_role(request,
role=role.id,
group=group_id,
domain=domain_id)
groups_added += 1
groups_to_modify -= groups_added
except Exception:
exceptions.handle(request, _('Failed to modify %s domain groups.'
% groups_to_modify))
return True
return True
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_asm_policy_signature_set
short_description: Manages Signature Sets on an ASM policy
description:
- Manages Signature Sets on an ASM policy.
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the signature sets to apply on, or remove from, the ASM policy.
- Apart from built-in signature sets that ship with the device, you can create and use
custom signature sets.
- When C(All Response Signatures), configures all signatures in the attack signature
pool that can review responses.
- When C(All Signatures), configures all attack signatures in the attack signature pool.
- When C(Apache Struts Signatures), configures signatures that target attacks against
the Apache Struts web servers. Only available in version 13.x and later.
- When C(Apache Tomcat Signatures), configures signatures that target attacks against
the Apache Tomcat web servers. Only available in version 13.x and later.
- When C(Cisco Signatures), configures signatures that target attacks against Cisco systems.
Only available in version 13.x and later.
- When C(Command Execution Signatures), configures signatures involving attacks perpetrated by executing commands.
- When C(Cross Site Scripting Signatures), configures signatures that target attacks caused
by cross-site scripting techniques.
- When C(Directory Indexing Signatures), configures signatures targeting attacks that browse directory listings.
- When C(Generic Detection Signatures), configures signatures targeting well-known
or common web and application attacks.
- When C(HTTP Response Splitting Signatures), configures signatures targeting attacks that
take advantage of responses for which input values have not been sanitized.
- When C(High Accuracy Detection Evasion Signatures), configures signatures with a high level of accuracy
that produce few false positives when identifying evasion attacks. Only available in version 13.x and later.
- When C(High Accuracy Signatures), configures signatures with a high level of accuracy
that produce few false positives when identifying evasion attacks.
- When C(IIS and Windows Signatures), configures signatures that target attacks against IIS
and Windows-based systems. Only available in version 13.x and later.
- When C(Information Leakage Signatures), configures signatures targeting attacks that are looking for system data
or debugging information that shows where the system is vulnerable to attack.
- When C(Java Servlets/JSP Signatures), configures signatures that target attacks against Java Servlets
and Java Server Pages (JSP) based applications. Only available in version 13.x and later.
- When C(Low Accuracy Signatures), configures signatures that may result in more false positives
when identifying attacks.
- When C(Medium Accuracy Signatures), configures signatures with a medium level of accuracy
when identifying attacks.
- When C(OS Command Injection Signatures), configures signatures targeting attacks
that attempt to run system level commands through a vulnerable application.
- When C(OWA Signatures), configures signatures that target attacks against
the Microsoft Outlook Web Access (OWA) application.
- When C(Other Application Attacks Signatures), configures signatures targeting miscellaneous attacks,
including session fixation, local file access, injection attempts, header tampering
and so on, affecting many applications.
- When C(Path Traversal Signatures), configures signatures targeting attacks that attempt to access files
and directories that are stored outside the web root folder.
- When C(Predictable Resource Location Signatures), configures signatures targeting attacks that attempt
to uncover hidden website content and functionality by forceful browsing, or by directory and file enumeration.
- When C(Remote File Include Signatures), configures signatures targeting attacks that attempt to exploit
a remote file include vulnerability that could enable a remote attacker to execute arbitrary commands
on the server hosting the application.
- When C(SQL Injection Signatures), configures signatures targeting attacks that attempt to insert (inject)
a SQL query using the input data from a client to an application.
- When C(Server Side Code Injection Signatures), configures signatures targeting code injection attacks
on the server side.
- When C(WebSphere signatures), configures signatures targeting attacks on many computing platforms
that are integrated using WebSphere, including general database, Microsoft Windows, IIS,
Microsoft SQL Server, Apache, Oracle, Unix/Linux, IBM DB2, PostgreSQL, and XML.
- When C(XPath Injection Signatures), configures signatures targeting attacks that attempt to gain access
to data structures or bypass permissions when a web site uses user-supplied information
to construct XPath queries for XML data.
type: str
required: True
policy_name:
description:
- Specifies the name of an existing ASM policy to add or remove signature sets to.
type: str
required: True
alarm:
description:
- Specifies if the security policy logs the request data in the Statistics screen
when a request matches a signature that is included in the signature set.
type: bool
block:
description:
- Effective when the security policy enforcement mode is Blocking.
- Determines how the system treats requests that match a signature included in the signature set.
- When C(yes), the system blocks all requests that match a signature,
and provides the client with a support ID number.
- When C(no), the system accepts those requests.
type: bool
learn:
description:
- Specifies if the security policy learns all requests that match a signature
that is included in the signature set.
type: bool
state:
description:
- When C(present), ensures the resource exists.
- When C(absent), ensures the resource is removed.
type: str
default: present
choices:
- present
- absent
partition:
description:
- This parameter is only used when identifying an ASM policy.
type: str
default: Common
notes:
- This module is primarily used as a component of configuring an ASM policy in the Ansible Galaxy ASM Policy Role.
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add Signature Set to ASM Policy
bigip_asm_policy_signature_set:
name: IIS and Windows Signatures
policy_name: FooPolicy
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Remove Signature Set to ASM Policy
bigip_asm_policy_signature_set:
name: IIS and Windows Signatures
policy_name: FooPolicy
state: absent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
policy_name:
description: The name of the ASM policy
returned: changed
type: str
sample: FooPolicy
name:
description: The name of the Signature Set added/removed on an ASM policy.
returned: changed
type: str
sample: Cisco Signatures
alarm:
description: Specifies whether the security policy logs the request data in the Statistics screen.
returned: changed
type: bool
sample: yes
block:
description: Determines how the system treats requests that match a signature included in the signature set.
returned: changed
type: bool
sample: no
learn:
description: Specifies if the policy learns all requests that match a signature that is included in the signature set.
returned: changed
type: bool
sample: yes
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from distutils.version import LooseVersion
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, f5_argument_spec, flatten_boolean
)
from ..module_utils.icontrol import (
module_provisioned, tmos_version
)
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
'alarm',
'block',
'learn',
]
returnables = [
'policy_name',
'name',
'alarm',
'block',
'learn',
]
updatables = [
'alarm',
'block',
'learn',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def alarm(self):
result = flatten_boolean(self._values['alarm'])
if result:
if result == 'yes':
return True
return False
@property
def block(self):
result = flatten_boolean(self._values['block'])
if result:
if result == 'yes':
return True
return False
@property
def learn(self):
result = flatten_boolean(self._values['learn'])
if result:
if result == 'yes':
return True
return False
def _signature_set_exists_on_device(self, name):
uri = "https://{0}:{1}/mgmt/tm/asm/signature-sets".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$select=name"
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201] or 'code' in response and response['code'] not in [200, 201]:
raise F5ModuleError(resp.content)
if any(p['name'] == name for p in response['items']):
return True
return False
@property
def name(self):
if self._values['name'] is None:
return None
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('13.0.0'):
name_list = [
'All Response Signatures',
'All Signatures',
'Command Execution Signatures',
'Cross Site Scripting Signatures',
'Directory Indexing Signatures',
'Generic Detection Signatures',
'HTTP Response Splitting Signatures',
'High Accuracy Signatures',
'Information Leakage Signatures',
'Low Accuracy Signatures',
'Medium Accuracy Signatures',
'OS Command Injection Signatures',
'OWA Signatures',
'Other Application Attacks Signatures',
'Path Traversal Signatures',
'Predictable Resource Location Signatures',
'Remote File Include Signatures',
'SQL Injection Signatures',
'Server Side Code Injection Signatures',
'WebSphere signatures',
'XPath Injection Signatures'
]
else:
name_list = [
'All Response Signatures',
'All Signatures',
'Apache Struts Signatures',
'Apache Tomcat Signatures',
'Cisco Signatures',
'Command Execution Signatures',
'Cross Site Scripting Signatures',
'Directory Indexing Signatures',
'Generic Detection Signatures',
'HTTP Response Splitting Signatures',
'High Accuracy Detection Evasion Signatures',
'High Accuracy Signatures',
'IIS and Windows Signatures',
'Information Leakage Signatures',
'Java Servlets/JSP Signatures',
'Low Accuracy Signatures',
'Medium Accuracy Signatures',
'OS Command Injection Signatures',
'OWA Signatures',
'Other Application Attacks Signatures',
'Path Traversal Signatures',
'Predictable Resource Location Signatures',
'Remote File Include Signatures',
'SQL Injection Signatures',
'Server Side Code Injection Signatures',
'WebSphere signatures',
'XPath Injection Signatures'
]
if self._values['name'] in name_list:
return self._values['name']
if self._signature_set_exists_on_device(self._values['name']):
return self._values['name']
raise F5ModuleError(
"The specified signature {0} set does not exist.".format(
self._values['name']
)
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def alarm(self):
return flatten_boolean(self._values['alarm'])
@property
def learn(self):
return flatten_boolean(self._values['learn'])
@property
def block(self):
return flatten_boolean(self._values['block'])
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params, client=self.client)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
if not module_provisioned(self.client, 'asm'):
raise F5ModuleError(
"ASM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
policy_id = self._get_policy_id()
set_link = self._get_signature_set_link()
uri = 'https://{0}:{1}/mgmt/tm/asm/policies/{2}/signature-sets/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id,
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' in response and response['items'] != []:
for st in response['items']:
if st['signatureSetReference']['link'] == set_link['link']:
self.want.ss_id = st['id']
return True
return False
def _get_signature_set_link(self):
result = None
signature_set = self.want.name
uri = "https://{0}:{1}/mgmt/tm/asm/signature-sets".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$select=name"
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201] or 'code' in response and response['code'] not in [200, 201]:
raise F5ModuleError(resp.content)
if 'items' in response and response['items'] != []:
for item in response['items']:
if item['name'] == signature_set:
result = dict(link=item['selfLink'])
return result
def _get_policy_id(self):
policy_id = None
uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,id".format(
self.want.policy_name, self.want.partition
)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201] or 'code' in response and response['code'] not in [200, 201]:
raise F5ModuleError(resp.content)
if 'items' in response and response['items'] != []:
policy_id = response['items'][0]['id']
if not policy_id:
raise F5ModuleError(
"The policy with the name {0} does not exist".format(self.want.policy_name)
)
return policy_id
def create_on_device(self):
policy_id = self._get_policy_id()
params = self.changes.api_params()
params['signatureSetReference'] = self._get_signature_set_link()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}/signature-sets/".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def update_on_device(self):
policy_id = self._get_policy_id()
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}/signature-sets/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id,
self.want.ss_id
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def remove_from_device(self):
policy_id = self._get_policy_id()
uri = 'https://{0}:{1}/mgmt/tm/asm/policies/{2}/signature-sets/{3}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id,
self.want.ss_id
)
response = self.client.api.delete(uri)
if response.status in [200, 201]:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}/signature-sets/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id,
self.want.ss_id
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
policy_name=dict(
required=True
),
name=dict(
required=True
),
alarm=dict(
type='bool'
),
block=dict(
type='bool'
),
learn=dict(
type='bool'
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking, single-threaded HTTP server.
Typical applications have little direct interaction with the `HTTPServer`
class except to start a server at the beginning of the process
(and even that is often done indirectly via `tornado.web.Application.listen`).
This module also defines the `HTTPRequest` class which is exposed via
`tornado.web.RequestHandler.request`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import socket
import ssl
import time
from tornado.escape import native_str, parse_qs_bytes
from tornado import httputil
from tornado import iostream
from tornado.log import gen_log
from tornado import netutil
from tornado.tcpserver import TCPServer
from tornado import stack_context
from tornado.util import bytes_type
try:
import Cookie # py2
except ImportError:
import http.cookies as Cookie # py3
class HTTPServer(TCPServer):
r"""A non-blocking, single-threaded HTTP server.
A server is defined by a request callback that takes an HTTPRequest
instance as an argument and writes a valid HTTP response with
`HTTPRequest.write`. `HTTPRequest.finish` finishes the request (but does
not necessarily close the connection in the case of HTTP/1.1 keep-alive
requests). A simple example server that echoes back the URI you
requested::
import tornado.httpserver
import tornado.ioloop
def handle_request(request):
message = "You requested %s\n" % request.uri
request.write("HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s" % (
len(message), message))
request.finish()
http_server = tornado.httpserver.HTTPServer(handle_request)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
`HTTPServer` is a very basic connection handler. It parses the request
headers and body, but the request callback is responsible for producing
the response exactly as it will appear on the wire. This affords
maximum flexibility for applications to implement whatever parts
of HTTP responses are required.
`HTTPServer` supports keep-alive connections by default
(automatically for HTTP/1.1, or for HTTP/1.0 when the client
requests ``Connection: keep-alive``). This means that the request
callback must generate a properly-framed response, using either
the ``Content-Length`` header or ``Transfer-Encoding: chunked``.
Applications that are unable to frame their responses properly
should instead return a ``Connection: close`` header in each
response and pass ``no_keep_alive=True`` to the `HTTPServer`
constructor.
If ``xheaders`` is ``True``, we support the
``X-Real-Ip``/``X-Forwarded-For`` and
``X-Scheme``/``X-Forwarded-Proto`` headers, which override the
remote IP and URI scheme/protocol for all requests. These headers
are useful when running Tornado behind a reverse proxy or load
balancer. The ``protocol`` argument can also be set to ``https``
if Tornado is run behind an SSL-decoding proxy that does not set one of
the supported ``xheaders``.
To make this server serve SSL traffic, send the ``ssl_options`` dictionary
argument with the arguments required for the `ssl.wrap_socket` method,
including ``certfile`` and ``keyfile``. (In Python 3.2+ you can pass
an `ssl.SSLContext` object instead of a dict)::
HTTPServer(applicaton, ssl_options={
"certfile": os.path.join(data_dir, "mydomain.crt"),
"keyfile": os.path.join(data_dir, "mydomain.key"),
})
`HTTPServer` initialization follows one of three patterns (the
initialization methods are defined on `tornado.tcpserver.TCPServer`):
1. `~tornado.tcpserver.TCPServer.listen`: simple single-process::
server = HTTPServer(app)
server.listen(8888)
IOLoop.instance().start()
In many cases, `tornado.web.Application.listen` can be used to avoid
the need to explicitly create the `HTTPServer`.
2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`:
simple multi-process::
server = HTTPServer(app)
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.instance().start()
When using this interface, an `.IOLoop` must *not* be passed
to the `HTTPServer` constructor. `~.TCPServer.start` will always start
the server on the default singleton `.IOLoop`.
3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process::
sockets = tornado.netutil.bind_sockets(8888)
tornado.process.fork_processes(0)
server = HTTPServer(app)
server.add_sockets(sockets)
IOLoop.instance().start()
The `~.TCPServer.add_sockets` interface is more complicated,
but it can be used with `tornado.process.fork_processes` to
give you more flexibility in when the fork happens.
`~.TCPServer.add_sockets` can also be used in single-process
servers if you want to create your listening sockets in some
way other than `tornado.netutil.bind_sockets`.
"""
def __init__(self, request_callback, no_keep_alive=False, io_loop=None,
xheaders=False, ssl_options=None, protocol=None, **kwargs):
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self.protocol = protocol
TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options,
**kwargs)
def handle_stream(self, stream, address):
HTTPConnection(stream, address, self.request_callback,
self.no_keep_alive, self.xheaders, self.protocol)
class _BadRequestException(Exception):
"""Exception class for malformed HTTP requests."""
pass
class HTTPConnection(object):
"""Handles a connection to an HTTP client, executing HTTP requests.
We parse HTTP headers and bodies, and execute the request callback
until the HTTP conection is closed.
"""
def __init__(self, stream, address, request_callback, no_keep_alive=False,
xheaders=False, protocol=None):
self.stream = stream
self.address = address
# Save the socket's address family now so we know how to
# interpret self.address even after the stream is closed
# and its socket attribute replaced with None.
self.address_family = stream.socket.family
self.request_callback = request_callback
self.no_keep_alive = no_keep_alive
self.xheaders = xheaders
self.protocol = protocol
self._clear_request_state()
# Save stack context here, outside of any request. This keeps
# contexts from one request from leaking into the next.
self._header_callback = stack_context.wrap(self._on_headers)
self.stream.set_close_callback(self._on_connection_close)
self.stream.read_until(b"\r\n\r\n", self._header_callback)
def _clear_request_state(self):
"""Clears the per-request state.
This is run in between requests to allow the previous handler
to be garbage collected (and prevent spurious close callbacks),
and when the connection is closed (to break up cycles and
facilitate garbage collection in cpython).
"""
self._request = None
self._request_finished = False
self._write_callback = None
self._close_callback = None
def set_close_callback(self, callback):
"""Sets a callback that will be run when the connection is closed.
Use this instead of accessing
`HTTPConnection.stream.set_close_callback
<.BaseIOStream.set_close_callback>` directly (which was the
recommended approach prior to Tornado 3.0).
"""
self._close_callback = stack_context.wrap(callback)
def _on_connection_close(self):
if self._close_callback is not None:
callback = self._close_callback
self._close_callback = None
callback()
# Delete any unfinished callbacks to break up reference cycles.
self._header_callback = None
self._clear_request_state()
def close(self):
self.stream.close()
# Remove this reference to self, which would otherwise cause a
# cycle and delay garbage collection of this connection.
self._header_callback = None
self._clear_request_state()
def write(self, chunk, callback=None):
"""Writes a chunk of output to the stream."""
if not self.stream.closed():
self._write_callback = stack_context.wrap(callback)
self.stream.write(chunk, self._on_write_complete)
def finish(self):
"""Finishes the request."""
self._request_finished = True
# No more data is coming, so instruct TCP to send any remaining
# data immediately instead of waiting for a full packet or ack.
self.stream.set_nodelay(True)
if not self.stream.writing():
self._finish_request()
def _on_write_complete(self):
if self._write_callback is not None:
callback = self._write_callback
self._write_callback = None
callback()
# _on_write_complete is enqueued on the IOLoop whenever the
# IOStream's write buffer becomes empty, but it's possible for
# another callback that runs on the IOLoop before it to
# simultaneously write more data and finish the request. If
# there is still data in the IOStream, a future
# _on_write_complete will be responsible for calling
# _finish_request.
if self._request_finished and not self.stream.writing():
self._finish_request()
def _finish_request(self):
if self.no_keep_alive or self._request is None:
disconnect = True
else:
connection_header = self._request.headers.get("Connection")
if connection_header is not None:
connection_header = connection_header.lower()
if self._request.supports_http_1_1():
disconnect = connection_header == "close"
elif ("Content-Length" in self._request.headers
or self._request.method in ("HEAD", "GET")):
disconnect = connection_header != "keep-alive"
else:
disconnect = True
self._clear_request_state()
if disconnect:
self.close()
return
try:
# Use a try/except instead of checking stream.closed()
# directly, because in some cases the stream doesn't discover
# that it's closed until you try to read from it.
self.stream.read_until(b"\r\n\r\n", self._header_callback)
# Turn Nagle's algorithm back on, leaving the stream in its
# default state for the next request.
self.stream.set_nodelay(False)
except iostream.StreamClosedError:
self.close()
def _on_headers(self, data):
try:
data = native_str(data.decode('latin1'))
eol = data.find("\r\n")
start_line = data[:eol]
try:
method, uri, version = start_line.split(" ")
except ValueError:
raise _BadRequestException("Malformed HTTP request line")
if not version.startswith("HTTP/"):
raise _BadRequestException("Malformed HTTP version in HTTP Request-Line")
try:
headers = httputil.HTTPHeaders.parse(data[eol:])
except ValueError:
# Probably from split() if there was no ':' in the line
raise _BadRequestException("Malformed HTTP headers")
# HTTPRequest wants an IP, not a full socket address
if self.address_family in (socket.AF_INET, socket.AF_INET6):
remote_ip = self.address[0]
else:
# Unix (or other) socket; fake the remote address
remote_ip = '0.0.0.0'
self._request = HTTPRequest(
connection=self, method=method, uri=uri, version=version,
headers=headers, remote_ip=remote_ip, protocol=self.protocol)
content_length = headers.get("Content-Length")
if content_length:
content_length = int(content_length)
if content_length > self.stream.max_buffer_size:
raise _BadRequestException("Content-Length too long")
if headers.get("Expect") == "100-continue":
self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
self.stream.read_bytes(content_length, self._on_request_body)
return
self.request_callback(self._request)
except _BadRequestException as e:
gen_log.info("Malformed HTTP request from %s: %s",
self.address[0], e)
self.close()
return
def _on_request_body(self, data):
self._request.body = data
if self._request.method in ("POST", "PATCH", "PUT"):
httputil.parse_body_arguments(
self._request.headers.get("Content-Type", ""), data,
self._request.arguments, self._request.files)
self.request_callback(self._request)
class HTTPRequest(object):
"""A single HTTP request.
All attributes are type `str` unless otherwise noted.
.. attribute:: method
HTTP request method, e.g. "GET" or "POST"
.. attribute:: uri
The requested uri.
.. attribute:: path
The path portion of `uri`
.. attribute:: query
The query portion of `uri`
.. attribute:: version
HTTP version specified in request, e.g. "HTTP/1.1"
.. attribute:: headers
`.HTTPHeaders` dictionary-like object for request headers. Acts like
a case-insensitive dictionary with additional methods for repeated
headers.
.. attribute:: body
Request body, if present, as a byte string.
.. attribute:: remote_ip
Client's IP address as a string. If ``HTTPServer.xheaders`` is set,
will pass along the real IP address provided by a load balancer
in the ``X-Real-Ip`` or ``X-Forwarded-For`` header.
.. versionchanged:: 3.1
The list format of ``X-Forwarded-For`` is now supported.
.. attribute:: protocol
The protocol used, either "http" or "https". If ``HTTPServer.xheaders``
is set, will pass along the protocol used by a load balancer if
reported via an ``X-Scheme`` header.
.. attribute:: host
The requested hostname, usually taken from the ``Host`` header.
.. attribute:: arguments
GET/POST arguments are available in the arguments property, which
maps arguments names to lists of values (to support multiple values
for individual names). Names are of type `str`, while arguments
are byte strings. Note that this is different from
`.RequestHandler.get_argument`, which returns argument values as
unicode strings.
.. attribute:: files
File uploads are available in the files property, which maps file
names to lists of `.HTTPFile`.
.. attribute:: connection
An HTTP request is attached to a single HTTP connection, which can
be accessed through the "connection" attribute. Since connections
are typically kept open in HTTP/1.1, multiple requests can be handled
sequentially on a single connection.
"""
def __init__(self, method, uri, version="HTTP/1.0", headers=None,
body=None, remote_ip=None, protocol=None, host=None,
files=None, connection=None):
self.method = method
self.uri = uri
self.version = version
self.headers = headers or httputil.HTTPHeaders()
self.body = body or ""
# set remote IP and protocol
self.remote_ip = remote_ip
if protocol:
self.protocol = protocol
elif connection and isinstance(connection.stream,
iostream.SSLIOStream):
self.protocol = "https"
else:
self.protocol = "http"
# xheaders can override the defaults
if connection and connection.xheaders:
# Squid uses X-Forwarded-For, others use X-Real-Ip
ip = self.headers.get("X-Forwarded-For", self.remote_ip)
ip = ip.split(',')[-1].strip()
ip = self.headers.get(
"X-Real-Ip", ip)
if netutil.is_valid_ip(ip):
self.remote_ip = ip
# AWS uses X-Forwarded-Proto
proto = self.headers.get(
"X-Scheme", self.headers.get("X-Forwarded-Proto", self.protocol))
if proto in ("http", "https"):
self.protocol = proto
self.host = host or self.headers.get("Host") or "127.0.0.1"
self.files = files or {}
self.connection = connection
self._start_time = time.time()
self._finish_time = None
self.path, sep, self.query = uri.partition('?')
self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics"""
return self.version == "HTTP/1.1"
@property
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.SimpleCookie()
if "Cookie" in self.headers:
try:
self._cookies.load(
native_str(self.headers["Cookie"]))
except Exception:
self._cookies = {}
return self._cookies
def write(self, chunk, callback=None):
"""Writes the given chunk to the response stream."""
assert isinstance(chunk, bytes_type)
self.connection.write(chunk, callback=callback)
def finish(self):
"""Finishes this HTTP request on the open connection."""
self.connection.finish()
self._finish_time = time.time()
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def get_ssl_certificate(self, binary_form=False):
"""Returns the client's SSL certificate, if any.
To use client certificates, the HTTPServer must have been constructed
with cert_reqs set in ssl_options, e.g.::
server = HTTPServer(app,
ssl_options=dict(
certfile="foo.crt",
keyfile="foo.key",
cert_reqs=ssl.CERT_REQUIRED,
ca_certs="cacert.crt"))
By default, the return value is a dictionary (or None, if no
client certificate is present). If ``binary_form`` is true, a
DER-encoded form of the certificate is returned instead. See
SSLSocket.getpeercert() in the standard library for more
details.
http://docs.python.org/library/ssl.html#sslsocket-objects
"""
try:
return self.connection.stream.socket.getpeercert(
binary_form=binary_form)
except ssl.SSLError:
return None
def __repr__(self):
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s, headers=%s)" % (
self.__class__.__name__, args, dict(self.headers))
|
|
"""
Attempt to scape AIM25's Wiener Library material.
"""
import os
import re
import sys
import json
import httplib2
import urllib
import codecs
import babel
from django.core.management.base import BaseCommand, CommandError
from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup
BASEURL = "http://www.aim25.ac.uk"
INSTID = 104
PAGES = 56 # 56
# get a reverse dict of language-name -> code
LANGCODES = dict([(v, k) for k, v in \
babel.Locale("en").languages.iteritems()])
def get_page_soup(url):
"""Get a BeautifulSoup object for a given page."""
http = httplib2.Http()
resp, content = http.request(url)
return BeautifulSoup(content)
def search_aim25(instid=INSTID, page=1):
"""Run a post query."""
# 1. run an advanced search. Scape the page
# 2. for the number of result pages
# 3. for each page, scape the links
url = "/cgi-bin/vcdf/asearch?"
params = dict(
inst_id=instid,
pageno=page
)
query = urllib.urlencode(params)
soup = get_page_soup(BASEURL + url + query)
links = []
for tr in soup.find("div", id="content").find("table").findAll("tr")[1:]:
links.append(BASEURL + tr.find("a").attrs[0][1])
return links
def get_identity_details(soup):
"""Scape the identity section."""
idfields = dict(
identifier="Reference code(s)",
name="Full title",
dates="Date(s)",
extent_and_medium="Extent",
name_of_creators="Name of creator(s)"
)
ids = dict()
table = soup.find("div", id="content").find("table")
for tr in table.findAll("tr"):
for key, val in idfields.iteritems():
if tr.find(text=val):
ids[key] = re.sub("^:\s+", "", tr.findAll("td")[1].text)
break
return ids
def get_paragraph_divided(soup):
"""Scape sections consisting of paragraph header/content."""
sections = dict(
archival_history="Administrative/Biographical history",
scope_and_content="Scope and content/abstract",
arrangement="System of arrangement",
access_conditions="Conditions governing access",
reproduction_conditions="Conditions governing reproduction",
finding_aids="Finding aids",
sources="Immediate source of acquisition"
)
sects = dict()
for key, val in sections.iteritems():
text = u""
p = soup.find("h2", text=val).parent.parent
for pn in p.nextSiblingGenerator():
if hasattr(pn, "name"):
if pn.name == "h2" or pn.find("strong"):
break
if pn.name == 'p':
text += "%s\n\n" % pn.text.strip()
sects[key] = text.strip()
return sects
def get_break_divided(soup):
"""Scrape sections divided by break: header/content."""
sections = dict(
language="Language/scripts of material",
archivist_note="Archivist's note",
rules="Rules or conventions",
dates_of_description="Date(s) of descriptions",
related_materials="Related material",
publication_note="Publication note"
)
sects = dict()
for key, val in sections.iteritems():
p = soup.find("h2", text=val).parent.parent
parts = [c for c in p.childGenerator()]
if len(parts) == 4:
sects[key] = re.sub("^:\s+", "", parts[3].strip())
else:
sects[key] = u''
return sects
def get_keywords(soup):
"""Get keywords, represented as checkboxes."""
head = soup.find("h2", text="Related Subject Search")
if head is None:
return []
div = head.parent.parent.parent
attrs = [i.attrMap["value"] for i in \
div.findAll("input", {"name": "keyword"})]
return [urllib.unquote(a) for a in attrs]
def get_corporate_names(soup):
"""Get person names, represented as checkboxes."""
head = soup.find("h2", text="Related Corporate Name Search")
if head is None:
return []
div = head.parent.parent.parent
attrs = [i.attrMap["value"] for i in \
div.findAll("input", {"name": "keyword"})]
return [urllib.unquote(a) for a in attrs]
def get_person_names(soup):
"""Get person names, represented as checkboxes."""
head = soup.find("h2", text="Related Personal Name Search")
if head is None:
return []
div = head.parent.parent.parent
attrs = [i.attrMap["value"] for i in \
div.findAll("input", {"name": "keyword"})]
return [urllib.unquote(a) for a in attrs]
def scrape_item(url):
"""Scrape a collection's details."""
# this will be fugly
soup = get_page_soup(url)
ids = get_identity_details(soup)
brs = get_break_divided(soup)
prs = get_paragraph_divided(soup)
keywords = get_keywords(soup)
persons = get_person_names(soup)
corps = get_corporate_names(soup)
info = dict(ids.items() + brs.items() + prs.items())
info["languages"] = convert_languages(info.pop("language",""))
info["keywords"] = keywords
info["people"] = persons
info["corps"] = corps
return info
def convert_languages(langstr):
"""Get codes from names."""
languages = re.sub("[\W]", " ", langstr)\
.replace("Romani", "Romany").split()
langs = []
for langword in languages:
code = LANGCODES.get(langword)
if code is not None:
langs.append(code)
return langs
class Command(BaseCommand):
def handle(self, *args, **kwargs):
"""Run scrape."""
# input a file containing urls to scrap
if not len(args) == 2:
raise CommandError("No input url file or output file given.")
output = []
with open(args[0], "r") as infile:
for line in infile.readlines():
url = line.strip()
if not url:
continue
sys.stderr.write("Scraping url: %s\n" % url)
item = scrape_item(url)
sys.stderr.write("%s\n" % item)
output.append(item)
with codecs.open(args[1], "w", "utf8") as outfile:
json.dump(output, outfile, ensure_ascii=False, indent=2)
|
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Dendrogram helper functions and class'''
from copy import deepcopy
import sys
import numpy as np
from neurom.core import Tree, Neurite
from neurom.core.dataformat import COLS
def _n_terminations(tree):
'''Get the number of terminations in a tree'''
return sum(1 for _ in tree.ileaf())
def _max_recursion_depth(obj):
''' Estimate recursion depth, which is defined as the number of nodes in a tree
'''
neurites = obj.neurites if hasattr(obj, 'neurites') else [obj]
return max(sum(1 for _ in neu.iter_sections()) for neu in neurites)
def _total_rectangles(tree):
'''
Calculate the total number of segments that are required
for the dendrogram. There is a vertical line for each segment
and two horizontal line at each branching point
'''
return sum(len(sec.children) + sec.points.shape[0] - 1
for sec in tree.iter_sections())
def _n_rectangles(obj):
'''
Calculate the total number of rectangles with respect to
the type of the object
'''
return sum(_total_rectangles(neu) for neu in obj.neurites) \
if hasattr(obj, 'neurites') else _total_rectangles(obj)
def _square_segment(radius, origin):
'''Vertices for a square
'''
return np.array(((origin[0] - radius, origin[1] - radius),
(origin[0] - radius, origin[1] + radius),
(origin[0] + radius, origin[1] + radius),
(origin[0] + radius, origin[1] - radius)))
def _vertical_segment(old_offs, new_offs, spacing, radii):
'''Vertices for a vertical rectangle
'''
return np.array(((new_offs[0] - radii[0], old_offs[1] + spacing[1]),
(new_offs[0] - radii[1], new_offs[1]),
(new_offs[0] + radii[1], new_offs[1]),
(new_offs[0] + radii[0], old_offs[1] + spacing[1])))
def _horizontal_segment(old_offs, new_offs, spacing, diameter):
'''Vertices of a horizontal rectangle
'''
return np.array(((old_offs[0], old_offs[1] + spacing[1]),
(new_offs[0], old_offs[1] + spacing[1]),
(new_offs[0], old_offs[1] + spacing[1] - diameter),
(old_offs[0], old_offs[1] + spacing[1] - diameter)))
def _spacingx(node, max_dims, xoffset, xspace):
'''Determine the spacing of the current node depending on the number
of the leaves of the tree
'''
x_spacing = _n_terminations(node) * xspace
if x_spacing > max_dims[0]:
max_dims[0] = x_spacing
return xoffset - x_spacing / 2.
def _update_offsets(start_x, spacing, terminations, offsets, length):
'''Update the offsets
'''
return (start_x + spacing[0] * terminations / 2.,
offsets[1] + spacing[1] * 2. + length)
def _max_diameter(tree):
'''Find max diameter in tree
'''
return 2. * max(max(node.points[:, COLS.R]) for node in tree.ipreorder())
class Dendrogram(object):
'''Dendrogram
'''
def __init__(self, obj, show_diameters=True):
'''Create dendrogram
'''
# flag for diameters
self._show_diameters = show_diameters
# input object, tree, or neuron
self._obj = deepcopy(Neurite(obj) if isinstance(obj, Tree) else obj)
# counter/index for the storage of the rectangles.
# it is updated recursively
self._n = 0
# the maximum lengths in x and y that is occupied
# by a neurite. It is updated recursively.
self._max_dims = [0., 0.]
# stores indices that refer to the _rectangles array
# for each neurite
self._groups = []
# dims store the max dimensions for each neurite
# essential for the displacement in the plotting
self._dims = []
# initialize the number of rectangles
self._rectangles = np.zeros([_n_rectangles(self._obj), 4, 2])
# determine the maximum recursion depth for the given object
# which depends on the tree with the maximum number of nodes
self._max_rec_depth = _max_recursion_depth(self._obj)
def _generate_soma(self):
'''soma'''
radius = self._obj.soma.radius
return _square_segment(radius, (0., -radius))
def generate(self):
'''Generate dendrogram
'''
offsets = (0., 0.)
n_previous = 0
# set recursion limit with respect to
# the max number of nodes on the trees
old_depth = sys.getrecursionlimit()
max_depth = old_depth if old_depth > self._max_rec_depth else self._max_rec_depth
# TODO: This should be fixed so we don't set sys.setrecursionlimit at all
sys.setrecursionlimit(max_depth)
if isinstance(self._obj, Neurite):
max_diameter = _max_diameter(self._obj.root_node)
dummy_section = Tree()
dummy_section.add_child(self._obj.root_node)
self._generate_dendro(dummy_section, (max_diameter, 0.), offsets)
self._groups.append((0., self._n))
self._dims.append(self._max_dims)
else:
for neurite in self._obj.neurites:
neurite = neurite.root_node
max_diameter = _max_diameter(neurite)
dummy_section = Tree()
dummy_section.add_child(neurite)
self._generate_dendro(dummy_section, (max_diameter, 0.), offsets)
# store in trees the indices for the slice which corresponds
# to the current neurite
self._groups.append((n_previous, self._n))
# store the max dims per neurite for view positioning
self._dims.append(self._max_dims)
# reset the max dimensions for the next tree in line
self._max_dims = [0., 0.]
# keep track of the next tree start index in list
n_previous = self._n
# set it back to its initial value
sys.setrecursionlimit(old_depth)
def _generate_dendro(self, current_section, spacing, offsets):
'''Recursive function for dendrogram line computations
'''
max_dims = self._max_dims
start_x = _spacingx(current_section, max_dims, offsets[0], spacing[0])
for child in current_section.children:
segments = child.points
# number of leaves in child
terminations = _n_terminations(child)
# segement lengths
seg_lengths = np.linalg.norm(np.subtract(segments[:-1, COLS.XYZ],
segments[1:, COLS.XYZ]), axis=1)
# segment radii
radii = np.vstack((segments[:-1, COLS.R], segments[1:, COLS.R])).T \
if self._show_diameters else np.zeros((seg_lengths.shape[0], 2))
y_offset = offsets[1]
for i, slen in enumerate(seg_lengths):
# offset update for the vertical segments
new_offsets = _update_offsets(start_x, spacing, terminations,
(offsets[0], y_offset), slen)
# segments are drawn vertically, thus only y_offset changes from init offsets
self._rectangles[self._n] = _vertical_segment((offsets[0], y_offset),
new_offsets, spacing, radii[i, :])
self._n += 1
y_offset = new_offsets[1]
if y_offset + spacing[1] * 2 + sum(seg_lengths) > max_dims[1]:
max_dims[1] = y_offset + spacing[1] * 2. + sum(seg_lengths)
self._max_dims = max_dims
# recursive call to self.
self._generate_dendro(child, spacing, new_offsets)
# update the starting position for the next child
start_x += terminations * spacing[0]
# write the horizontal lines only for bifurcations, where the are actual horizontal
# lines and not zero ones
if offsets[0] != new_offsets[0]:
# horizontal segment. Thickness is either 0 if show_diameters is false
# or 1. if show_diameters is true
self._rectangles[self._n] = _horizontal_segment(offsets, new_offsets, spacing, 0.)
self._n += 1
@property
def data(self):
''' Returns the array with the rectangle collection
'''
return self._rectangles
@property
def groups(self):
''' Returns the list of the indices for the slicing of the
rectangle array wich correspond to each neurite
'''
return self._groups
@property
def dims(self):
''' Returns the list of the max dimensions for each neurite
'''
return self._dims
@property
def types(self):
''' Returns an iterator over the types of the neurites in the object.
If the object is a tree, then one value is returned.
'''
neurites = self._obj.neurites if hasattr(self._obj, 'neurites') else (self._obj,)
return (neu.type for neu in neurites)
@property
def soma(self):
''' Returns soma
'''
return self._generate_soma() if hasattr(self._obj, 'soma') else None
|
|
from typing import Dict, List
import textwrap
from keras.layers import Input
from overrides import overrides
import numpy
from ...data.instances.multiple_choice_qa import TupleInferenceInstance
from ...layers import NoisyOr
from ...layers.attention import MaskedSoftmax
from ...layers.backend import RepeatLike
from ...layers.subtract_minimum import SubtractMinimum
from ...layers.tuple_matchers import tuple_matchers
from ...training import TextTrainer
from ...training.models import DeepQaModel
from ...common.params import Params
class TupleInferenceModel(TextTrainer):
"""
This ``TextTrainer`` implements the TupleEntailment model of Tushar. It takes a set of tuples
from the question and its answer candidates and a set of background knowledge tuples and looks
for entailment between the corresponding tuple slots. The result is a probability distribution
over the answer options based on how well they align with the background tuples, given the
question text. We consider this alignment to be a form of soft inference, hence the model
name.
Parameters
----------
tuple_matcher: Dict[str, Any]
Parameters for selecting and then initializing the inner entailment model, one of the
TupleMatch models.
noisy_or_param_init: str, default='uniform'
The initialization for the noise parameters in the ``NoisyOr`` layers.
num_question_tuples: int, default=10
The number of tuples for each of the answer candidates in the question.
num_background_tuples: int, default=10
The number of tuples for the background knowledge.
num_tuple_slots: int, default=4
The number of slots in each tuple.
num_slot_words: int, default=5
The number of words in each slot of the tuples.
num_options: int, default=4
The number of answer options/candidates.
normalize_tuples_across_answers: bool, default=False
Whether or not to normalize each question tuple's score across the answer options. This
assumes that the tuples are in the same order for all answer options. Normalization is
currently done by subtracting the minimum score for a given tuple "position" from all the
tuples in that position.
display_text_wrap: int, default=150
This is used by the debug output methods to wrap long tuple strings.
display_num_tuples: int, default=5
This is used by the debug output methods. It determines how many background tuples to display for
each of the answer tuples in a given instance when displaying the tuple match scores.
"""
def __init__(self, params: Params):
self.noisy_or_param_init = params.pop('noisy_or_param_init', 'uniform')
self.num_question_tuples = params.pop('num_question_tuples', None)
self.num_background_tuples = params.pop('num_background_tuples', None)
self.num_tuple_slots = params.pop('num_tuple_slots', None)
self.num_slot_words = params.pop('num_slot_words', None)
self.num_options = params.pop('num_answer_options', None)
self.normalize_tuples_across_answers = params.pop('normalize_tuples_across_answers', False)
self.display_text_wrap = params.pop('display_text_wrap', 150)
self.display_num_tuples = params.pop('display_num_tuples', 5)
tuple_matcher_params = params.pop('tuple_matcher', {})
tuple_matcher_choice = tuple_matcher_params.pop_choice("type", list(tuple_matchers.keys()),
default_to_first_choice=True)
tuple_matcher_class = tuple_matchers[tuple_matcher_choice]
self.tuple_matcher = tuple_matcher_class(self, tuple_matcher_params)
self.tuple_matcher.name = "match_layer"
super(TupleInferenceModel, self).__init__(params)
self.name = 'TupleInferenceModel'
@overrides
def _instance_type(self):
return TupleInferenceInstance
@classmethod
@overrides
def _get_custom_objects(cls):
custom_objects = super(TupleInferenceModel, cls)._get_custom_objects()
for tuple_matcher in tuple_matchers.values():
custom_objects.update(tuple_matcher.get_custom_objects())
custom_objects['MaskedSoftmax'] = MaskedSoftmax
custom_objects['NoisyOr'] = NoisyOr
custom_objects['RepeatLike'] = RepeatLike
custom_objects['SubtractMinimum'] = SubtractMinimum
return custom_objects
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
padding_lengths = super(TupleInferenceModel, self).get_padding_lengths()
padding_lengths['num_question_tuples'] = self.num_question_tuples
padding_lengths['num_background_tuples'] = self.num_background_tuples
padding_lengths['num_slots'] = self.num_tuple_slots
padding_lengths['num_sentence_words'] = self.num_slot_words
padding_lengths['num_options'] = self.num_options
return padding_lengths
@overrides
def get_instance_sorting_keys(self) -> List[str]: # pylint: disable=no-self-use
return ['num_sentence_words', 'num_background_tuples', 'num_question_tuples', 'num_slots']
@overrides
def _set_padding_lengths(self, padding_lengths: Dict[str, int]):
super(TupleInferenceModel, self)._set_padding_lengths(padding_lengths)
# The number of tuple slots determines the shape of some of the weights in our model, so we
# need to keep this constant.
if self.num_tuple_slots is None:
self.num_tuple_slots = padding_lengths['num_slots']
if self.data_generator is not None and self.data_generator.dynamic_padding:
return
if self.num_question_tuples is None:
self.num_question_tuples = padding_lengths['num_question_tuples']
if self.num_background_tuples is None:
self.num_background_tuples = padding_lengths['num_background_tuples']
if self.num_slot_words is None:
self.num_slot_words = padding_lengths['num_sentence_words']
if self.num_options is None:
self.num_options = padding_lengths['num_options']
@overrides
def get_padding_memory_scaling(self, padding_lengths: Dict[str, int]) -> int:
num_question_tuples = padding_lengths['num_question_tuples']
num_background_tuples = padding_lengths['num_background_tuples']
num_sentence_words = padding_lengths['num_sentence_words']
num_options = padding_lengths['num_options']
return num_question_tuples * num_background_tuples * num_sentence_words * num_options
@overrides
def _set_padding_lengths_from_model(self):
self.num_background_tuples = self.model.get_input_shape_at(0)[1][1]
self.num_options = self.model.get_input_shape_at(0)[0][1]
self.num_question_tuples = self.model.get_input_shape_at(0)[0][2]
self.num_tuple_slots = self.model.get_input_shape_at(0)[0][3]
self.num_slot_words = self.model.get_input_shape_at(0)[0][4]
self._set_text_lengths_from_model_input = self.model.get_input_shape_at(0)[0][4:]
@overrides
def _build_model(self):
r"""
The basic outline of the model is that the question input, :math:`\mathcal{A}` (which consists of the
inputs for each of the answer choices, i.e., each :math:`A^c \in \mathcal{A}`), and the background input,
:math:`\mathcal{K}`, get tiled to be the same size. They are then aligned tuple-by-tuple: each of the
background tuples, :math:`k_j` is compared to each of the answer tuples, :math:`a_i^c`, to create a
support/entailment score, :math:`s_{ij}^c`. This score is determined using the selected ``TupleMatch``
layer.
Then, for each answer tuple, :math:`a_i^c \in A^c` we combine
the scores for each :math:`k_j \in K` using noisy-or to get the entailment score for the given answer
choice tuple::
:math:`s_i^c = 1 - \prod_{j=1:J}(1 - q_1 * s_{ij}^c)`
where q_1 is the noise parameter for this first noisy-or. Next, we combine these scores for each answer
choice again using the noisy-or to get the entailment score for the answer candidate::
:math:`s^c = 1 - \prod_{i=1:N}(1 - q_2 * s_{i}^c)`
where q_2 is the noise parameter for this second noisy-or. At this point, we have a score for each of
the answer candidates, and so we perform a softmax to determine which is the best answer.
"""
# shape: (batch size, num_options, num_question_tuples, num_tuple_slots, num_slot_words)
slot_shape = self._get_sentence_shape(self.num_slot_words)
question_input_shape = (self.num_options, self.num_question_tuples, self.num_tuple_slots) + slot_shape
question_input = Input(question_input_shape, dtype='int32', name='question_input')
# shape: (batch size, num_background_tuples, num_tuple_slots, num_slot_words)
background_input_shape = (self.num_background_tuples, self.num_tuple_slots) + slot_shape
background_input = Input(background_input_shape, dtype='int32', name='background_input')
# Expand and tile the question input to be:
# shape: (batch size, num_options, num_question_tuples, num_background_tuples, num_tuple_slots,
# num_slot_words)
tiled_question = RepeatLike(axis=3, copy_from_axis=1)([question_input, background_input])
# Expand and tile the background input to match question input.
# shape: (batch size, num_options, num_question_tuples, num_background_tuples, num_tuple_slots,
# num_slot_words)
# First, add num_options.
tiled_background = RepeatLike(axis=1, copy_from_axis=1)([background_input, question_input])
# Next, add num_question_tuples.
tiled_background = RepeatLike(axis=2, copy_from_axis=2)([tiled_background, question_input])
# Find the matches between the question and background tuples.
# shape: (batch size, num_options, num_question_tuples, num_background_tuples)
matches = self.tuple_matcher([tiled_question, tiled_background])
# Find the probability that any given question tuple is entailed by the given background tuples.
# shape: (batch size, num_options, num_question_tuples)
combine_background_evidence = NoisyOr(axis=-1, param_init=self.noisy_or_param_init)
combine_background_evidence.name = "noisy_or_1"
qi_probabilities = combine_background_evidence(matches)
# If desired, peek across the options, and normalize the amount that a given answer tuple template "counts"
# towards a correct answer.
if self.normalize_tuples_across_answers:
normalize_across_options = SubtractMinimum(axis=1)
qi_probabilities = normalize_across_options(qi_probabilities)
# Find the probability that any given option is correct, given the entailement scores of each of its
# question tuples given the set of background tuples.
# shape: (batch size, num_options)
combine_question_evidence = NoisyOr(axis=-1, param_init=self.noisy_or_param_init)
combine_question_evidence.name = "noisy_or_2"
options_probabilities = combine_question_evidence(qi_probabilities)
# Softmax over the options to choose the best one.
final_output = MaskedSoftmax(name="masked_softmax")(options_probabilities)
return DeepQaModel(input=[question_input, background_input], output=[final_output])
@overrides
def _instance_debug_output(self, instance: TupleInferenceInstance, outputs: Dict[str, numpy.array]) -> str:
num_to_display = 5
result = ""
result += "\n====================================================================\n"
result += "Instance: %s\n" % instance.index
result += "Question Text: %s\n" % instance.question_text
result += "Label: %s\n" % instance.label
result += "Num tuples per answer option: %s\n" % [len(answer) for answer in instance.answer_tuples]
result += "(limiting display to top %s at various levels)\n" % num_to_display
result += "====================================================================\n"
answer_scores = []
index_of_chosen = None
softmax_output = outputs.get("masked_softmax", None)
if softmax_output is not None:
answer_scores = list(enumerate(softmax_output))
sorted_answer_scores = sorted(answer_scores, key=lambda tup: tup[1], reverse=True)
# TODO(becky): not handling ties
index_of_chosen = sorted_answer_scores[0][0]
result += "Final scores: %s\n" % answer_scores
if index_of_chosen is None:
result += "ERROR: no answer chosen\n"
elif index_of_chosen == instance.label:
result += " Answered correctly!\n"
else:
result += " Answered incorrectly\n"
result += "====================================================================\n"
# Output of the tuple matcher layer:
# shape: (num_options, num_question_tuples, num_background_tuples)
tuple_matcher_output = outputs.get('match_layer', None)
if tuple_matcher_output is not None:
# correct answer:
# Keep only the first tuples (depending on model setting) because when we padded we set
# truncate_from_right to False.
correct_tuples = instance.answer_tuples[instance.label][:self.num_question_tuples]
background_tuples = instance.background_tuples[:self.num_background_tuples]
result += "-----------------------------------\n"
result += " GOLD ANSWER: (Final score: {0})\n".format(answer_scores[instance.label][1])
result += "-----------------------------------\n"
result += self._render_tuple_match_scores(correct_tuples,
background_tuples,
tuple_matcher_output[instance.label],
instance)
result += "-------------------\n"
result += " Incorrect Answers: \n"
result += "-------------------\n"
# NOTE: that extra padded "options" are added on the right, so this should be fine.
for option in range(len(instance.answer_tuples)):
chosen_status = ""
if option != instance.label:
option_tuples = instance.answer_tuples[option][:self.num_question_tuples]
if option == index_of_chosen:
chosen_status = "(Chosen)"
result += "\nOption {0} {1}: (Final Score: {2})\n".format(option,
chosen_status,
answer_scores[option][1])
result += self._render_tuple_match_scores(option_tuples,
background_tuples,
tuple_matcher_output[option],
instance)
result += "\n"
return result
def _render_tuple_match_scores(self, answer_tuples, background_tuples, tuple_matcher_output, instance):
result = ""
for i, answer_tuple in enumerate(answer_tuples):
answer_tuple_string = "\n\t".join(textwrap.wrap(answer_tuple.display_string(), self.display_text_wrap))
result += "Question (repeated): %s\n" % instance.question_text
result += "Answer_tuple_{0} : \n\t{1}\n\n".format(i, answer_tuple_string)
result += "Top {0} (out of {1}) highest scoring background tuples:\n\n".format(self.display_num_tuples,
len(background_tuples))
tuple_match_scores = []
for j, background_tuple in enumerate(background_tuples):
tuple_match_score = tuple_matcher_output[i, j]
tuple_match_scores.append((tuple_match_score, j, background_tuple))
# Sort descending by tuple match score
sorted_by_score = sorted(tuple_match_scores, key=lambda tup: tup[0],
reverse=True)[:self.display_num_tuples]
for scored in sorted_by_score:
background_tuple_index = scored[1]
background_tuple_string = scored[2].display_string()
wrapped_tuple = "\n\t".join(textwrap.wrap(background_tuple_string, self.display_text_wrap))
result += " (TupleMatch Score %s) " % scored[0]
result += "\tbg_tuple_{0} \n\t{1}\n".format(background_tuple_index, wrapped_tuple)
result += "\n"
return result
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import six
# Bokeh imports
from bokeh.colors import named
from bokeh.palettes import __palettes__
from bokeh._testing.util.api import verify_all
# Module under test
import bokeh.core.enums as bce
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'Align',
'Anchor',
'AngleUnits',
'ButtonType',
'DashPattern',
'DateFormat',
'DatetimeUnits',
'Dimension',
'Dimensions',
'Direction',
'Enumeration',
'enumeration',
'FontStyle',
'HatchPattern',
'HatchPatternAbbreviation',
'HoldPolicy',
'HorizontalLocation',
'JitterRandomDistribution',
'LatLon',
'LegendClickPolicy',
'LegendLocation',
'LineCap',
'LineDash',
'LineJoin',
'Location',
'MapType',
'MarkerType',
'NamedColor',
'NumeralLanguage',
'Orientation',
'OutputBackend',
'PaddingUnits',
'Palette',
'RenderLevel',
'RenderMode',
'ResetPolicy',
'RoundingFunction',
'SizingMode',
'SizingPolicy',
'SliderCallbackPolicy',
'SortDirection',
'SpatialUnits',
'StartEnd',
'StepMode',
'TextAlign',
'TextBaseline',
'TextureRepetition',
'TickLabelOrientation',
'TooltipAttachment',
'TooltipFieldFormatter',
'TrackPolicy',
'VerticalAlign',
'VerticalLocation',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def test_Enumeration_default():
e = bce.Enumeration()
assert e.__slots__ == ()
class Test_enumeration(object):
def test_basic(self):
e = bce.enumeration("foo", "bar", "baz")
assert isinstance(e, bce.Enumeration)
assert str(e) == "Enumeration(foo, bar, baz)"
assert [x for x in e] == ["foo", "bar", "baz"]
for x in ["foo", "bar", "baz"]:
assert x in e
assert "junk" not in e
def test_case(self):
e = bce.enumeration("foo", "bar", "baz", case_sensitive=False)
assert isinstance(e, bce.Enumeration)
assert str(e) == "Enumeration(foo, bar, baz)"
assert [x for x in e] == ["foo", "bar", "baz"]
for x in ["foo", "FOO", "bar", "bAr", "baz", "BAZ"]:
assert x in e
assert "junk" not in e
@pytest.mark.skipif(six.PY2, reason="Unimportant uicode silliness, py2 going away soon")
def test_quote(self):
e = bce.enumeration("foo", "bar", "baz", quote=True)
assert isinstance(e, bce.Enumeration)
assert str(e) == 'Enumeration("foo", "bar", "baz")' or str(e) == "Enumeration('foo', 'bar', 'baz')"
assert [x for x in e] == ["foo", "bar", "baz"]
for x in ["foo", "bar", "baz"]:
assert x in e
assert "junk" not in e
def test_default(self):
# this is private but used by properties
e = bce.enumeration("foo", "bar", "baz")
assert e._default == "foo"
def test_len(self):
e = bce.enumeration("foo", "bar", "baz")
assert len(e) == 3
class Test_bce(object):
def test_Anchor(self):
assert tuple(bce.Anchor) == (
"top_left", "top_center", "top_right",
"center_left", "center", "center_right",
"bottom_left", "bottom_center", "bottom_right"
)
def test_AngleUnits(self):
assert tuple(bce.AngleUnits) == ('deg', 'rad')
def test_ButtonType(self):
assert tuple(bce.ButtonType) == ("default", "primary", "success", "warning", "danger")
def test_DashPattern(self):
assert tuple(bce.DashPattern) ==("solid", "dashed", "dotted", "dotdash", "dashdot")
def test_DateFormat(self):
assert tuple(bce.DateFormat) == ("ATOM", "W3C", "RFC-3339", "ISO-8601", "COOKIE", "RFC-822",
"RFC-850", "RFC-1036", "RFC-1123", "RFC-2822", "RSS", "TIMESTAMP")
def test_DatetimeUnits(self):
assert tuple(bce.DatetimeUnits) == ("microseconds", "milliseconds", "seconds", "minsec",
"minutes", "hourmin", "hours", "days", "months", "years")
def test_Dimension(self):
assert tuple(bce.Dimension) == ("width", "height")
def test_Dimensions(self):
assert tuple(bce.Dimensions) == ("width", "height", "both")
def test_Direction(self):
assert tuple(bce.Direction) == ("clock", "anticlock")
def test_FontStyle(self):
assert tuple(bce.FontStyle) == ('normal', 'italic', 'bold', 'bold italic')
def test_HatchPattern(self):
assert tuple(bce.HatchPattern) == (
"blank", "dot", "ring", "horizontal_line", "vertical_line", "cross", "horizontal_dash", "vertical_dash",
"spiral", "right_diagonal_line", "left_diagonal_line", "diagonal_cross", "right_diagonal_dash",
"left_diagonal_dash", "horizontal_wave", "vertical_wave", "criss_cross"
)
def test_HatchPatternAbbreviation(self):
assert tuple(bce.HatchPatternAbbreviation) ==(' ', '.', 'o', '-', '|', '+', '"', ':', '@', '/', '\\', 'x', ',', '`', 'v', '>', '*')
def test_HoldPolicy(self):
assert tuple(bce.HoldPolicy) == ("combine", "collect")
def test_HorizontalLocation(self):
assert tuple(bce.HorizontalLocation) == ("left", "right")
def test_JitterRandomDistribution(self):
assert tuple(bce.JitterRandomDistribution) == ("uniform", "normal")
def test_LatLon(self):
assert tuple(bce.LatLon) == ("lat", "lon")
def test_LegendClickPolicy(self):
assert tuple(bce.LegendClickPolicy) == ("none", "hide", "mute")
def test_LegendLocation(self):
assert tuple(bce.LegendLocation) == (
"top_left", "top_center", "top_right",
"center_left", "center", "center_right",
"bottom_left", "bottom_center", "bottom_right"
)
def test_LineCap(self):
assert tuple(bce.LineCap) == ("butt", "round", "square")
def test_LineDash(self):
assert tuple(bce.LineDash) == ("solid", "dashed", "dotted", "dotdash", "dashdot")
def test_LineJoin(self):
assert tuple(bce.LineJoin) == ("miter", "round", "bevel")
def test_Location(self):
assert tuple(bce.Location) == ("above", "below", "left", "right")
def test_MapType(self):
assert tuple(bce.MapType) == ("satellite", "roadmap", "terrain", "hybrid")
def test_MarkerType(self):
assert tuple(bce.MarkerType) == ("asterisk", "circle", "circle_cross", "circle_x", "cross",
"dash", "diamond", "diamond_cross", "hex", "inverted_triangle",
"square", "square_cross", "square_x", "triangle", "x")
def test_NamedColor(self):
assert len(tuple(bce.NamedColor)) == 147
assert tuple(bce.NamedColor) == tuple(named.__all__)
def test_NumeralLanguage(self):
assert tuple(bce.NumeralLanguage) == ("be-nl", "chs", "cs", "da-dk", "de-ch", "de", "en",
"en-gb", "es-ES", "es", "et", "fi", "fr-CA", "fr-ch",
"fr", "hu", "it", "ja", "nl-nl", "pl", "pt-br",
"pt-pt", "ru", "ru-UA", "sk", "th", "tr", "uk-UA")
def test_Orientation(self):
assert tuple(bce.Orientation) == ("horizontal", "vertical")
def test_OutputBackend(self):
assert tuple(bce.OutputBackend) == ("canvas", "svg", "webgl")
def test_PaddingUnits(self):
assert tuple(bce.PaddingUnits) == ("percent", "absolute")
def test_Palette(self):
assert tuple(bce.Palette) == tuple(__palettes__)
def test_RenderLevel(self):
assert tuple(bce.RenderLevel) == ("image", "underlay", "glyph", "annotation", "overlay")
def test_RenderMode(self):
assert tuple(bce.RenderMode) == ("canvas", "css")
def test_ResetPolicy(self):
assert tuple(bce.ResetPolicy) == ("standard", "event_only")
def test_RoundingFunction(self):
assert tuple(bce.RoundingFunction) == ("round", "nearest", "floor", "rounddown", "ceil", "roundup")
def test_SizingMode(self):
assert tuple(bce.SizingMode) == ("stretch_width", "stretch_height", "stretch_both", "scale_width", "scale_height", "scale_both", "fixed")
def test_SliderCallbackPolicy(self):
assert tuple(bce.SliderCallbackPolicy) == ("continuous", "throttle", "mouseup")
def test_SortDirection(self):
assert tuple(bce.SortDirection) == ("ascending", "descending")
def test_SpatialUnits(self):
assert tuple(bce.SpatialUnits) == ("screen", "data")
def test_StartEnd(self):
assert tuple(bce.StartEnd) == ("start", "end")
def test_StepMode(self):
assert tuple(bce.StepMode) == ("before", "after", "center")
def test_TextAlign(self):
assert tuple(bce.TextAlign) == ("left", "right", "center")
def test_TextBaseline(self):
assert tuple(bce.TextBaseline) == ("top", "middle", "bottom", "alphabetic", "hanging", "ideographic")
def test_TextureRepetition(self):
assert tuple(bce.TextureRepetition) == ("repeat", "repeat_x", "repeat_y", "no_repeat")
def test_TickLabelOrientation(self):
assert tuple(bce.TickLabelOrientation) == ("horizontal", "vertical", "parallel", "normal")
def test_TooltipAttachment(self):
assert tuple(bce.TooltipAttachment) == ("horizontal", "vertical", "left", "right", "above", "below")
def test_TooltipFieldFormatter(self):
assert tuple(bce.TooltipFieldFormatter) == ("numeral", "datetime", "printf")
def test_VerticalAlign(self):
assert tuple(bce.VerticalAlign) == ("top", "middle", "bottom")
def test_VerticalLocation(self):
assert tuple(bce.VerticalLocation) == ("above", "below")
# any changes to contents of bce.py easily trackable here
def test_enums_contents():
enums = [x for x in ALL if x != "enumeration"]
assert [x for x in dir(bce) if x[0].isupper()] == enums
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bce, ALL)
|
|
#!/usr/bin/python
import serial
import time
import sys
from simulator import *
class Display(object):
'''
Driver for the Hanover display.
Currently, this driver only works with resolution of 128x16, at address 1
This limitation must be changed in a future version.
'''
def __init__(self, serial, address, columns, lines, font, debug=False, simulator=False):
self.port = serial
if lines % 8:
lines = lines + (8-(lines % 8))
self.lines = lines / 8
self.columns = columns - 1
self.data = ((lines * columns) / 8)
res1, res2 = self.byte_to_ascii(self.data & 0xff)
self.byte_per_column = lines / 8
address += 16
add1, add2 = self.byte_to_ascii(address)
# Header part
self.header = [0x2, add1, add2, res1, res2]
# Footer part
self.footer = [0x3, 0x00, 0x00]
# Data buffer initialized to 0
print self.data
self.buf = [0] * (self.data / self.byte_per_column)
print len(self.buf)
# Fonts object
self.font = font
# Debug flag
self.DEBUG = debug
# Simulator switch
self.SIMULATOR = simulator
if self.SIMULATOR:
self.sim = Simulator()
self.connect()
def connect(self):
'''
Connect to the serial device
'''
if not self.SIMULATOR:
try:
self.ser = serial.Serial(port=self.port, baudrate=4800)
except:
print sys.exc_info()
print "Error opening serial port"
self.ser = None
if self.DEBUG:
print "Serial port:", self.ser
elif self.DEBUG:
print "Simulator instance", self.sim
def set_font(self, font):
'''
Set a font
'''
self.font = font
def erase_all(self):
'''
Erase all the screen
'''
if self.DEBUG:
print "Erasing all"
for i in range(len(self.buf)):
self.buf[i] = 0
def write_text(self, text, line=0, column=0):
'''
Write text on the first line
'''
if self.DEBUG:
print "First line text : ", text
# Detect the size
mask = 0xff
for byte in self.font[0x31]:
if byte.bit_length >= 9:
mask = 0xffff
break
# Parse all the characters
for char in text:
# Fill the buffer
for i in range(len(self.font[0])):
if column > self.columns:
return 0
self.buf[column] &= ~((mask << line) & ((1 << self.byte_per_column*8)-1))
self.buf[column] |= ((self.font[ord(char)][i])<<line) & (1 << (self.byte_per_column * 8)) - 1
column += 1
def byte_to_ascii(self, byte):
'''
Convert a byte to its ascii reprensentation.
The transmission represent each byte by their ASCII representation.
For example, 0x67 is reprensented by 0x36 0x37 (ascii 6 and ascii 7)
This is not an elegant way to convert the data, and this function must
be refactored
'''
b1 = 0
b2 = 0
b1 = byte >> 4
if b1 > 9:
b1 += 0x37
else:
b1 += 0x30
b2 = byte % 16
if b2 > 9:
b2 += 0x37
else:
b2 += 0x30
return (b1, b2)
def __checksum__(self, dsum):
'''
Compute the checksum of the data frame
'''
sum = 0
# Sum all bytes of the header and the buffer
for byte in self.header:
sum += byte
sum += dsum
# Start of text (0x02) must be removed,
# End of text (0x03) must be added
sum += 1
# Result must be casted to 8 bits
sum = sum & 0xFF
# Checksum is the sum XOR 255 + 1. So, sum of all bytes + checksum
# is equal to 0 (8 bits)
crc = (sum ^ 255) + 1
# Transfor the checksum in ascii
crc1, crc2 = self.byte_to_ascii(crc)
# Add the checksum on the footer
self.footer[1] = crc1
self.footer[2] = crc2
if self.DEBUG:
print "SUM : %d, CRC : %d, SUM + CRC : %d"%(sum, crc, sum+crc)
def send(self):
'''
Send the frame via the serial port
:return: Return 0 on success, -1 on errors
'''
if self.DEBUG:
print self.header, self.buf, self.footer
print ""
if not self.SIMULATOR:
crc = 0
try:
# Send the header
for byte in self.header:
self.ser.write(chr(byte))
# Send the data
for col in self.buf:
for i in range(self.byte_per_column):
b1, b2 = self.byte_to_ascii((col >> (8*i) & 0xFF))
crc += b1
crc += b2
self.ser.write(chr(b1))
self.ser.write(chr(b2))
# Compute the checksum
self.__checksum__(crc)
# Send the footer
for byte in self.footer:
self.ser.write(chr(byte))
return 0
except:
return -1
else:
simbuf = []
for byte in self.buf:
for i in range(self.lines):
b1, b2 = self.byte_to_ascii(byte >> (i*8) & 0xFF)
simbuf.append(b2)
simbuf.append(b1)
self.sim.display(simbuf, self.lines)
return 0
|
|
"""
solve the diffusion equation:
phi_t = k phi_{xx}
with a Crank-Nicolson implicit discretization
M. Zingale (2013-04-03)
"""
import numpy
from scipy import linalg
import sys
import pylab
import diffimplicit
def diffuseCN(gr, phi, k, dt):
""" diffuse phi implicitly through timestep dt, with a C-N
temporal discretization """
phinew = gr.scratchArray()
alpha = k*dt/gr.dx**2
# create the RHS of the matrix
gr.fillBC()
R = 0.5*k*dt*lap(gr, phi)
R = R[gr.ilo:gr.ihi+1]
R += phi[gr.ilo:gr.ihi+1]
# create the diagonal, d+1 and d-1 parts of the matrix
d = (1.0 + alpha)*numpy.ones(gr.nx)
u = -0.5*alpha*numpy.ones(gr.nx)
u[0] = 0.0
l = -0.5*alpha*numpy.ones(gr.nx)
l[gr.nx-1] = 0.0
# set the boundary conditions by changing the matrix elements
# homogeneous neumann
d[0] = 1.0 + 0.5*alpha
d[gr.nx-1] = 1.0 + 0.5*alpha
# Dirichlet
#d[0] = 1.0 + 1.5*alpha
#d[gr.nx-1] = 1.0 + 1.5*alpha
#R[0] += alpha*phi1
#R[gr.nx-1] += alpha*phi1
# solve
A = numpy.matrix([u,d,l])
phinew[gr.ilo:gr.ihi+1] = linalg.solve_banded((1,1), A, R)
return phinew
def lap(gr, phi):
""" compute the Laplacian of phi """
lapphi = gr.scratchArray()
ib = gr.ilo
ie = gr.ihi
lapphi[ib:ie+1] = (phi[ib-1:ie] - 2.0*phi[ib:ie+1] + phi[ib+1:ie+2])/gr.dx**2
return lapphi
class grid:
def __init__(self, nx, ng=1, xmin=0.0, xmax=1.0):
""" grid class initialization """
self.nx = nx
self.ng = ng
self.xmin = xmin
self.xmax = xmax
self.dx = (xmax - xmin)/nx
self.x = (numpy.arange(nx+2*ng) + 0.5 - ng)*self.dx + xmin
self.ilo = ng
self.ihi = ng+nx-1
# storage for the solution
self.phi = numpy.zeros((nx+2*ng), dtype=numpy.float64)
def fillBC(self):
""" fill the Neumann BCs """
# Neumann BCs
self.phi[0:self.ilo] = self.phi[self.ilo]
self.phi[self.ihi+1:] = self.phi[self.ihi]
def scratchArray(self):
return numpy.zeros((2*self.ng+self.nx), dtype=numpy.float64)
def phi_a(self, t, k, t0, phi1, phi2):
""" analytic solution """
xc = 0.5*(self.xmin + self.xmax)
return (phi2 - phi1)*numpy.sqrt(t0/(t + t0)) * \
numpy.exp(-0.25*(self.x-xc)**2/(k*(t + t0))) + phi1
def norm(self, e):
""" return the norm of quantity e which lives on the grid """
if not len(e) == (2*self.ng + self.nx):
return None
return numpy.sqrt(self.dx*numpy.sum(e[self.ilo:self.ihi+1]**2))
def evolve(nx, k, t0, phi1, phi2, C, tmax):
"""
the main evolution loop. Evolve
phi_t = k phi_{xx}
from t = 0 to tmax
"""
# create the grid
gr = grid(nx, ng=1, xmax=1.0)
# time info
dt = C*0.5*gr.dx**2/k
t = 0.0
# initialize the data
gr.phi[:] = gr.phi_a(0.0, k, t0, phi1, phi2)
while (t < tmax):
gr.fillBC()
# make sure we end right at tmax
if (t + dt > tmax):
dt = tmax - t
# diffuse for dt
phinew = diffuseCN(gr, gr.phi, k, dt)
gr.phi[:] = phinew[:]
t += dt
return gr
def evolveExplicit(nx, k, t0, phi1, phi2, C, tmax):
""" fully explicit for comparison """
ng = 1
# create the grid
g = grid(nx, ng, xmax=1.0)
# time info
dt = C*0.5*g.dx**2/k
t = 0.0
# initialize the data
g.phi[:] = g.phi_a(0.0, k, t0, phi1, phi2)
# evolution loop
phinew = g.scratchArray()
while (t < tmax):
# make sure we end right at tmax
if (t + dt > tmax):
dt = tmax - t
# fill the boundary conditions
g.fillBC()
alpha = k*dt/g.dx**2
# loop over zones
i = g.ilo
while (i <= g.ihi):
# Lax-Wendroff
phinew[i] = g.phi[i] + alpha*(g.phi[i+1] - 2.0*g.phi[i] + g.phi[i-1])
i += 1
# store the updated solution
g.phi[:] = phinew[:]
t += dt
return g
#-----------------------------------------------------------------------------
# convergence C = 0.8
pylab.clf()
# a characteristic timescale for diffusion if L^2/k
tmax = 0.005
t0 = 1.e-4
phi1 = 1.0
phi2 = 2.0
k = 1.0
N = [32, 64, 128, 256, 512]
ng = 1
# CFL number
C = 0.8
err = []
errFOimpl = []
errExpl = []
for nx in N:
print nx
# the present C-N discretization
g = evolve(nx, k, t0, phi1, phi2, C, tmax)
# compare to the first-order implicit discretization
gFOimpl = diffimplicit.evolve(nx, k, t0, phi1, phi2, C, tmax)
# compare to the explicit discretization
gExpl = evolveExplicit(nx, k, t0, phi1, phi2, C, tmax)
phi_analytic = g.phi_a(tmax, k, t0, phi1, phi2)
err.append(g.norm(g.phi - phi_analytic))
errFOimpl.append(g.norm(gFOimpl.phi - phi_analytic))
errExpl.append(g.norm(gExpl.phi - phi_analytic))
pylab.clf()
N = numpy.array(N, dtype=numpy.float64)
err = numpy.array(err)
print "err = ", err
print "errFOimpl = ", errFOimpl
print "errExpl = ", errExpl
pylab.scatter(N, err, color="r", label="C-N implicit diffusion")
pylab.scatter(N, errFOimpl, color="b", label="backward-diff implicit diffusion")
pylab.scatter(N, errExpl, color="g", label="forward-diff explicit diffusion")
pylab.plot(N, err[len(N)-1]*(N[len(N)-1]/N)**2, color="k", label="$\mathcal{O}(\Delta x^2)$")
ax = pylab.gca()
ax.set_xscale('log')
ax.set_yscale('log')
pylab.xlabel(r"$N$")
pylab.ylabel(r"L2 norm of absolute error")
pylab.title("Convergence of Diffusion Methods, C = %3.2f, t = %5.2g" % (C, tmax))
pylab.ylim(1.e-6, 1.e-2)
pylab.legend(frameon=False, fontsize="small")
pylab.savefig("diffmethods-converge-0.8.png")
#-----------------------------------------------------------------------------
# convergence C = 2.0
pylab.clf()
# a characteristic timescale for diffusion if L^2/k
tmax = 0.005
t0 = 1.e-4
phi1 = 1.0
phi2 = 2.0
k = 1.0
N = [32, 64, 128, 256, 512]
ng = 1
# CFL number
C = 2.0
err = []
errFOimpl = []
errExpl = []
for nx in N:
print nx
# the present C-N discretization
g = evolve(nx, k, t0, phi1, phi2, C, tmax)
# compare to the first-order implicit discretization
gFOimpl = diffimplicit.evolve(nx, k, t0, phi1, phi2, C, tmax)
phi_analytic = g.phi_a(tmax, k, t0, phi1, phi2)
err.append(g.norm(g.phi - phi_analytic))
errFOimpl.append(g.norm(gFOimpl.phi - phi_analytic))
pylab.clf()
N = numpy.array(N, dtype=numpy.float64)
err = numpy.array(err)
print "err = ", err
print "errFOimpl = ", errFOimpl
pylab.scatter(N, err, color="r", label="C-N implicit diffusion")
pylab.scatter(N, errFOimpl, color="b", label="backward-diff implicit diffusion")
pylab.plot(N, err[len(N)-1]*(N[len(N)-1]/N)**2, color="k", label="$\mathcal{O}(\Delta x^2)$")
ax = pylab.gca()
ax.set_xscale('log')
ax.set_yscale('log')
pylab.xlabel(r"$N$")
pylab.ylabel(r"L2 norm of absolute error")
pylab.title("Convergence of Diffusion Methods, C = %3.2f, t = %5.2g" % (C, tmax))
pylab.ylim(1.e-6, 1.e-2)
pylab.legend(frameon=False, fontsize="small")
pylab.savefig("diffmethods-converge-2.0.png")
|
|
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from intel import topology
def test_init_topology_one_core():
lscpu = """#The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2,L3
0,0,0,0,,0,0,0,0
"""
sockets = topology.parse(lscpu)
assert len(sockets.sockets) == 1
cores = sockets.get_socket(0).cores
assert len(cores) == 1
assert 0 in cores
assert cores[0].cpu_ids() == [0]
def test_init_topology_two_cores():
lscpu = """#The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2,L3
0,0,0,0,,0,0,0,0
1,1,0,0,,1,1,1,0
"""
sockets = topology.parse(lscpu)
assert len(sockets.sockets) == 1
cores = sockets.get_socket(0).cores
assert len(cores) == 2
assert 0 in cores
assert 1 in cores
assert cores[0].cpu_ids() == [0]
assert cores[1].cpu_ids() == [1]
def test_init_topology_one_socket():
lscpu = """#The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2,L3
0,0,0,0,,0,0,0,0
1,1,0,0,,1,1,1,0
2,2,0,0,,2,2,2,0
3,3,0,0,,3,3,3,0
4,0,0,0,,0,0,0,0
5,1,0,0,,1,1,1,0
6,2,0,0,,2,2,2,0
7,3,0,0,,3,3,3,0
"""
sockets = topology.parse(lscpu)
assert len(sockets.sockets) == 1
cores = sockets.get_socket(0).cores
assert len(cores) == 4
assert 0 in cores
assert 1 in cores
assert 2 in cores
assert 3 in cores
assert cores[0].cpu_ids() == [0, 4]
assert cores[1].cpu_ids() == [1, 5]
assert cores[2].cpu_ids() == [2, 6]
assert cores[3].cpu_ids() == [3, 7]
def test_parse_isolcpus_invalid_input():
assert topology.parse_isolcpus("") == []
assert topology.parse_isolcpus("a") == []
assert topology.parse_isolcpus("a b") == []
assert topology.parse_isolcpus("a b\n") == []
assert topology.parse_isolcpus("a b c\nA B C") == []
assert topology.parse_isolcpus("a b=7 c\nA B C") == []
assert topology.parse_isolcpus("a b=7 c=7,8,9\nA B C") == []
assert topology.parse_isolcpus("a b=7 c=7, 8,9\nA B C") == []
def test_parse_isolcpus_valid_input():
cmdline = ("BOOT_IMAGE=/boot/vmlinuz-4.4.14-040414-generic "
"root=/dev/md2 ro net.ifnames=0 isolcpus=0,1,2,3,8,9,10,11")
assert topology.parse_isolcpus(cmdline) == [0, 1, 2, 3, 8, 9, 10, 11]
cmdline = ("BOOT_IMAGE=/boot/vmlinuz-4.4.14-040414-generic "
"root=/dev/md2 ro net.ifnames=0 "
"isolcpus=0,1,2,3,8,9,10,11,15-18")
assert topology.parse_isolcpus(cmdline) == [0, 1, 2, 3, 8, 9, 10, 11, 15,
16, 17, 18]
cmdline = ("BOOT_IMAGE=/boot/vmlinuz-4.4.14-040414-generic "
"root=/dev/md2 ro net.ifnames=0 "
"isolcpus=0,1,2,3,8,9,10,11,10-13")
assert topology.parse_isolcpus(cmdline) == [0, 1, 2, 3, 8, 9, 10, 11, 12,
13]
def test_topology_isolated_one_socket():
lscpu = """#The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2,L3
0,0,0,0,,0,0,0,0
1,1,0,0,,1,1,1,0
2,2,0,0,,2,2,2,0
3,3,0,0,,3,3,3,0
4,0,0,0,,0,0,0,0
5,1,0,0,,1,1,1,0
6,2,0,0,,2,2,2,0
7,3,0,0,,3,3,3,0
"""
isolated_cpus = [0, 4, 1, 5]
sockets = topology.parse(lscpu, isolated_cpus)
assert len(sockets.sockets) == 1
cores = sockets.get_socket(0).cores
assert len(cores) == 4
assert 0 in cores
assert 1 in cores
assert 2 in cores
assert 3 in cores
assert cores[0].cpu_ids() == [0, 4]
assert cores[0].is_isolated()
assert cores[1].cpu_ids() == [1, 5]
assert cores[1].is_isolated()
assert cores[2].cpu_ids() == [2, 6]
assert not cores[2].is_isolated()
assert cores[3].cpu_ids() == [3, 7]
assert not cores[3].is_isolated()
# Verify that partially isolated physical cores (where only a subset of
# the physical core's hyperthreads are in the isolated list) are not
# reported as isolated.
isolated_cpus = [0, 1]
sockets = topology.parse(lscpu, isolated_cpus)
assert len(sockets.sockets) == 1
cores = sockets.get_socket(0).cores.values()
assert len(cores) == 4
assert len([c for c in cores if c.is_isolated()]) == 0
# Test case where all discovered cores are isolated.
isolated_cpus = list(range(8))
sockets = topology.parse(lscpu, isolated_cpus)
assert len(sockets.sockets) == 1
cores = sockets.get_socket(0).cores.values()
assert len(cores) == 4
assert len([c for c in cores if c.is_isolated()]) == 4
# Test case where superset of discovered cores are isolated.
isolated_cpus = list(range(9))
sockets = topology.parse(lscpu, isolated_cpus)
assert len(sockets.sockets) == 1
cores = sockets.get_socket(0).cores.values()
assert len(cores) == 4
assert len([c for c in cores if c.is_isolated()]) == 4
def test_topology_isolated_two_sockets():
lscpu = """# The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2
0,0,0,,,0,0,0
1,1,0,,,1,1,1
2,2,0,,,2,2,2
3,3,0,,,3,3,3
4,0,0,,,4,4,4
5,1,0,,,5,5,5
6,2,0,,,6,6,6
7,3,0,,,7,7,7
8,0,1,,,8,8,8
9,1,1,,,9,9,9
10,2,1,,,10,10,10
11,3,1,,,11,11,11
12,0,1,,,12,12,12
13,1,1,,,13,13,13
14,2,1,,,14,14,14
15,3,1,,,15,15,15
"""
isolated_cpus = [0, 4, 1, 5, 8, 12, 10, 14]
sockets = topology.parse(lscpu, isolated_cpus)
assert len(sockets.sockets) == 2
cores = sockets.get_cores()
assert len(cores) == 8
for core in cores:
assert core.core_id >= 0
assert core.core_id < 16
assert cores[0].cpu_ids() == [0, 4]
assert cores[0].is_isolated()
assert cores[1].cpu_ids() == [1, 5]
assert cores[1].is_isolated()
assert cores[2].cpu_ids() == [2, 6]
assert not cores[2].is_isolated()
assert cores[3].cpu_ids() == [3, 7]
assert not cores[3].is_isolated()
assert cores[4].cpu_ids() == [8, 12]
assert cores[4].is_isolated()
assert cores[5].cpu_ids() == [9, 13]
assert not cores[5].is_isolated()
assert cores[6].cpu_ids() == [10, 14]
assert cores[6].is_isolated()
assert cores[7].cpu_ids() == [11, 15]
assert not cores[7].is_isolated()
isolated_cores = sockets.get_isolated_cores()
assert len(isolated_cores) == 4
for core in isolated_cores:
assert core.cpu_ids() in [[0, 4], [1, 5], [8, 12], [10, 14]]
assert core.cpu_ids() not in [[2, 6], [3, 7], [9, 13], [11, 15]]
# Verify that partially isolated physical cores (where only a subset of
# the physical core's hyperthreads are in the isolated list) are not
# reported as isolated.
isolated_cpus = [0, 1]
sockets = topology.parse(lscpu, isolated_cpus)
assert len(sockets.sockets) == 2
cores = sockets.get_socket(0).cores.values()
assert len(cores) == 4
assert len([c for c in cores if c.is_isolated()]) == 0
# Test case where all discovered cores are isolated.
isolated_cpus = list(range(8))
sockets = topology.parse(lscpu, isolated_cpus)
assert len(sockets.sockets) == 2
cores = sockets.get_socket(0).cores.values()
assert len(cores) == 4
assert len([c for c in cores if c.is_isolated()]) == 4
# Test case where superset of discovered cores are isolated.
isolated_cpus = list(range(9))
sockets = topology.parse(lscpu, isolated_cpus)
assert len(sockets.sockets) == 2
cores = sockets.get_socket(0).cores.values()
assert len(cores) == 4
assert len([c for c in cores if c.is_isolated()]) == 4
assert sockets.get_socket(3) is None
def test_topology_cores_get_modes():
lscpu = """# The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2
0,0,0,0,,0,0,0
1,0,0,0,,1,1,0
2,1,0,0,,2,2,1
3,1,0,0,,3,3,1
4,2,0,0,,4,4,2
5,2,0,0,,5,5,2
6,3,0,0,,6,6,3
7,3,0,0,,7,7,3
8,4,1,0,,8,8,4
9,4,1,0,,9,9,4
10,5,1,0,,10,10,5
11,5,1,0,,11,11,5
12,6,1,0,,12,12,6
13,6,1,0,,13,13,6
14,7,1,0,,14,14,7
15,7,1,0,,15,15,7
"""
isolated_cpus = [0, 1, 2, 3, 8, 9, 10, 11]
sockets = topology.parse(lscpu, isolated_cpus)
assert len(sockets.sockets) == 2
cores = sockets.get_cores(mode="spread")
assert cores[0].core_id == 0
assert cores[1].core_id == 4
assert cores[2].core_id == 1
assert cores[3].core_id == 5
cores = sockets.get_cores(mode="packed")
assert cores[0].core_id == 0
assert cores[1].core_id == 1
assert cores[2].core_id == 2
assert cores[3].core_id == 3
cores = sockets.get_cores(mode="unknown")
assert cores[0].core_id == 0
assert cores[1].core_id == 1
assert cores[2].core_id == 2
assert cores[3].core_id == 3
cores = sockets.get_isolated_cores(mode="spread")
for core in cores:
print(core.cpu_ids())
assert cores[0].core_id == 0
assert cores[1].core_id == 4
assert cores[2].core_id == 1
assert cores[3].core_id == 5
cores = sockets.get_isolated_cores(mode="packed")
for core in cores:
print(core.cpu_ids())
assert cores[0].core_id == 0
assert cores[1].core_id == 1
assert cores[2].core_id == 4
assert cores[3].core_id == 5
cores = sockets.get_isolated_cores(mode="unknown")
for core in cores:
print(core.cpu_ids())
assert cores[0].core_id == 0
assert cores[1].core_id == 1
assert cores[2].core_id == 4
assert cores[3].core_id == 5
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, glob, os
sys.path.insert(0, glob.glob(os.path.join(os.path.dirname(__file__),'../../lib/py/build/lib.*'))[0])
import unittest
import time
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydir', type='string', dest='genpydir',
default='gen-py',
help='include this local directory in sys.path for locating generated code')
parser.add_option("--port", type="int", dest="port",
help="connect to server at port")
parser.add_option("--host", type="string", dest="host",
help="connect to server")
parser.add_option("--zlib", action="store_true", dest="zlib",
help="use zlib wrapper for compressed transport")
parser.add_option("--ssl", action="store_true", dest="ssl",
help="use SSL for encrypted transport")
parser.add_option("--http", dest="http_path",
help="Use the HTTP transport with the specified path")
parser.add_option('-v', '--verbose', action="store_const",
dest="verbose", const=2,
help="verbose output")
parser.add_option('-q', '--quiet', action="store_const",
dest="verbose", const=0,
help="minimal output")
parser.add_option('--protocol', dest="proto", type="string",
help="protocol to use, one of: accel, binary, compact, json")
parser.add_option('--transport', dest="trans", type="string",
help="transport to use, one of: buffered, framed")
parser.set_defaults(framed=False, http_path=None, verbose=1, host='localhost', port=9090, proto='binary')
options, args = parser.parse_args()
script_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(script_dir, options.genpydir))
from ThriftTest import ThriftTest, SecondService
from ThriftTest.ttypes import *
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import THttpClient
from thrift.transport import TZlibTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import TJSONProtocol
class AbstractTest(unittest.TestCase):
def setUp(self):
if options.http_path:
self.transport = THttpClient.THttpClient(options.host, port=options.port, path=options.http_path)
else:
if options.ssl:
from thrift.transport import TSSLSocket
socket = TSSLSocket.TSSLSocket(options.host, options.port, validate=False)
else:
socket = TSocket.TSocket(options.host, options.port)
# frame or buffer depending upon args
self.transport = TTransport.TBufferedTransport(socket)
if options.trans == 'framed':
self.transport = TTransport.TFramedTransport(socket)
elif options.trans == 'buffered':
self.transport = TTransport.TBufferedTransport(socket)
elif options.trans == '':
raise AssertionError('Unknown --transport option: %s' % options.trans)
if options.zlib:
self.transport = TZlibTransport.TZlibTransport(self.transport, 9)
self.transport.open()
protocol = self.protocol_factory.getProtocol(self.transport)
self.client = ThriftTest.Client(protocol)
def tearDown(self):
# Close!
self.transport.close()
def testVoid(self):
self.client.testVoid()
def testString(self):
self.assertEqual(self.client.testString('Python' * 20), 'Python' * 20)
self.assertEqual(self.client.testString(''), '')
def testBool(self):
self.assertEqual(self.client.testBool(True), True)
self.assertEqual(self.client.testBool(False), False)
def testByte(self):
self.assertEqual(self.client.testByte(63), 63)
self.assertEqual(self.client.testByte(-127), -127)
def testI32(self):
self.assertEqual(self.client.testI32(-1), -1)
self.assertEqual(self.client.testI32(0), 0)
def testI64(self):
self.assertEqual(self.client.testI64(1), 1)
self.assertEqual(self.client.testI64(-34359738368), -34359738368)
def testDouble(self):
self.assertEqual(self.client.testDouble(-5.235098235), -5.235098235)
self.assertEqual(self.client.testDouble(0), 0)
self.assertEqual(self.client.testDouble(-1), -1)
# TODO: def testBinary(self) ...
def testStruct(self):
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = self.client.testStruct(x)
self.assertEqual(y, x)
def testNest(self):
inner = Xtruct(string_thing="Zero", byte_thing=1, i32_thing=-3,
i64_thing=-5)
x = Xtruct2(struct_thing=inner, byte_thing=0, i32_thing=0)
y = self.client.testNest(x)
self.assertEqual(y, x)
def testMap(self):
x = {0:1, 1:2, 2:3, 3:4, -1:-2}
y = self.client.testMap(x)
self.assertEqual(y, x)
def testSet(self):
x = set([8, 1, 42])
y = self.client.testSet(x)
self.assertEqual(y, x)
def testList(self):
x = [1, 4, 9, -42]
y = self.client.testList(x)
self.assertEqual(y, x)
def testEnum(self):
x = Numberz.FIVE
y = self.client.testEnum(x)
self.assertEqual(y, x)
def testTypedef(self):
x = 0xffffffffffffff # 7 bytes of 0xff
y = self.client.testTypedef(x)
self.assertEqual(y, x)
def testMapMap(self):
# does not work: dict() is not a hashable type, so a dict() cannot be used as a key in another dict()
#x = { {1:10, 2:20}, {1:100, 2:200, 3:300}, {1:1000, 2:2000, 3:3000, 4:4000} }
try:
y = self.client.testMapMap()
except:
pass
def testMulti(self):
xpected = Xtruct(string_thing='Hello2', byte_thing=74, i32_thing=0xff00ff, i64_thing=0xffffffffd0d0)
y = self.client.testMulti(xpected.byte_thing,
xpected.i32_thing,
xpected.i64_thing,
{ 0:'abc' },
Numberz.FIVE,
0xf0f0f0)
self.assertEqual(y, xpected)
def testException(self):
self.client.testException('Safe')
try:
self.client.testException('Xception')
self.fail("should have gotten exception")
except Xception, x:
self.assertEqual(x.errorCode, 1001)
self.assertEqual(x.message, 'Xception')
# TODO ensure same behavior for repr within generated python variants
# ensure exception's repr method works
#x_repr = repr(x)
#self.assertEqual(x_repr, 'Xception(errorCode=1001, message=\'Xception\')')
try:
self.client.testException("throw_undeclared")
self.fail("should have thrown exception")
except Exception: # type is undefined
pass
def testOneway(self):
start = time.time()
self.client.testOneway(1) # type is int, not float
end = time.time()
self.assertTrue(end - start < 3,
"oneway sleep took %f sec" % (end - start))
def testOnewayThenNormal(self):
self.client.testOneway(1) # type is int, not float
self.assertEqual(self.client.testString('Python'), 'Python')
class NormalBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
class CompactTest(AbstractTest):
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
class JSONTest(AbstractTest):
protocol_factory = TJSONProtocol.TJSONProtocolFactory()
class AcceleratedBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
if options.proto == 'binary': # look for --proto on cmdline
suite.addTest(loader.loadTestsFromTestCase(NormalBinaryTest))
elif options.proto == 'accel':
suite.addTest(loader.loadTestsFromTestCase(AcceleratedBinaryTest))
elif options.proto == 'compact':
suite.addTest(loader.loadTestsFromTestCase(CompactTest))
elif options.proto == 'json':
suite.addTest(loader.loadTestsFromTestCase(JSONTest))
else:
raise AssertionError('Unknown protocol given with --protocol: %s' % options.proto)
return suite
class OwnArgsTestProgram(unittest.TestProgram):
def parseArgs(self, argv):
if args:
self.testNames = args
else:
self.testNames = (self.defaultTest,)
self.createTests()
if __name__ == "__main__":
OwnArgsTestProgram(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=1))
|
|
"""Functions to construct sparse matrices
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum',
'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag']
import numpy as np
from scipy._lib.six import xrange
from .sputils import upcast, get_index_dtype, isscalarlike
from .csr import csr_matrix
from .csc import csc_matrix
from .bsr import bsr_matrix
from .coo import coo_matrix
from .dia import dia_matrix
from .base import issparse
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : str, optional
Format of the result. By default (format=None) an appropriate sparse
matrix format is returned. This choice is subject to change.
See Also
--------
diags : more convenient form of this function
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> from scipy.sparse import spdiags
>>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
>>> diags = np.array([0, -1, 2])
>>> spdiags(data, diags, 4, 4).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format)
def diags(diagonals, offsets=0, shape=None, format=None, dtype=None):
"""
Construct a sparse matrix from diagonals.
Parameters
----------
diagonals : sequence of array_like
Sequence of arrays containing the matrix diagonals,
corresponding to `offsets`.
offsets : sequence of int or an int, optional
Diagonals to set:
- k = 0 the main diagonal (default)
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
shape : tuple of int, optional
Shape of the result. If omitted, a square matrix large enough
to contain the diagonals is returned.
format : {"dia", "csr", "csc", "lil", ...}, optional
Matrix format of the result. By default (format=None) an
appropriate sparse matrix format is returned. This choice is
subject to change.
dtype : dtype, optional
Data type of the matrix.
See Also
--------
spdiags : construct matrix from diagonals
Notes
-----
This function differs from `spdiags` in the way it handles
off-diagonals.
The result from `diags` is the sparse equivalent of::
np.diag(diagonals[0], offsets[0])
+ ...
+ np.diag(diagonals[k], offsets[k])
Repeated diagonal offsets are disallowed.
.. versionadded:: 0.11
Examples
--------
>>> from scipy.sparse import diags
>>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]]
>>> diags(diagonals, [0, -1, 2]).toarray()
array([[1, 0, 1, 0],
[1, 2, 0, 2],
[0, 2, 3, 0],
[0, 0, 3, 4]])
Broadcasting of scalars is supported (but shape needs to be
specified):
>>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray()
array([[-2., 1., 0., 0.],
[ 1., -2., 1., 0.],
[ 0., 1., -2., 1.],
[ 0., 0., 1., -2.]])
If only one diagonal is wanted (as in `numpy.diag`), the following
works as well:
>>> diags([1, 2, 3], 1).toarray()
array([[ 0., 1., 0., 0.],
[ 0., 0., 2., 0.],
[ 0., 0., 0., 3.],
[ 0., 0., 0., 0.]])
"""
# if offsets is not a sequence, assume that there's only one diagonal
if isscalarlike(offsets):
# now check that there's actually only one diagonal
if len(diagonals) == 0 or isscalarlike(diagonals[0]):
diagonals = [np.atleast_1d(diagonals)]
else:
raise ValueError("Different number of diagonals and offsets.")
else:
diagonals = list(map(np.atleast_1d, diagonals))
offsets = np.atleast_1d(offsets)
# Basic check
if len(diagonals) != len(offsets):
raise ValueError("Different number of diagonals and offsets.")
# Determine shape, if omitted
if shape is None:
m = len(diagonals[0]) + abs(int(offsets[0]))
shape = (m, m)
# Determine data type, if omitted
if dtype is None:
dtype = np.common_type(*diagonals)
# Construct data array
m, n = shape
M = max([min(m + offset, n - offset) + max(0, offset)
for offset in offsets])
M = max(0, M)
data_arr = np.zeros((len(offsets), M), dtype=dtype)
K = min(m, n)
for j, diagonal in enumerate(diagonals):
offset = offsets[j]
k = max(0, offset)
length = min(m + offset, n - offset, K)
if length < 0:
raise ValueError("Offset %d (index %d) out of bounds" % (offset, j))
try:
data_arr[j, k:k+length] = diagonal[...,:length]
except ValueError:
if len(diagonal) != length and len(diagonal) != 1:
raise ValueError(
"Diagonal length (index %d: %d at offset %d) does not "
"agree with matrix size (%d, %d)." % (
j, len(diagonal), offset, m, n))
raise
return dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format)
def identity(n, dtype='d', format=None):
"""Identity matrix in sparse format
Returns an identity matrix with shape (n,n) using a given
sparse format and dtype.
Parameters
----------
n : int
Shape of the identity matrix.
dtype : dtype, optional
Data type of the matrix
format : str, optional
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> from scipy.sparse import identity
>>> identity(3).toarray()
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> identity(3, dtype='int8', format='dia')
<3x3 sparse matrix of type '<class 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
return eye(n, n, dtype=dtype, format=format)
def eye(m, n=None, k=0, dtype=float, format=None):
"""Sparse matrix with ones on diagonal
Returns a sparse (m x n) matrix where the k-th diagonal
is all ones and everything else is zeros.
Parameters
----------
m : int
Number of rows in the matrix.
n : int, optional
Number of columns. Default: `m`.
k : int, optional
Diagonal to place ones on. Default: 0 (main diagonal).
dtype : dtype, optional
Data type of the matrix.
format : str, optional
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> from scipy import sparse
>>> sparse.eye(3).toarray()
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> sparse.eye(3, dtype=np.int8)
<3x3 sparse matrix of type '<class 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
if n is None:
n = m
m,n = int(m),int(n)
if m == n and k == 0:
# fast branch for special formats
if format in ['csr', 'csc']:
idx_dtype = get_index_dtype(maxval=n)
indptr = np.arange(n+1, dtype=idx_dtype)
indices = np.arange(n, dtype=idx_dtype)
data = np.ones(n, dtype=dtype)
cls = {'csr': csr_matrix, 'csc': csc_matrix}[format]
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
idx_dtype = get_index_dtype(maxval=n)
row = np.arange(n, dtype=idx_dtype)
col = np.arange(n, dtype=idx_dtype)
data = np.ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : str, optional
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> from scipy import sparse
>>> A = sparse.csr_matrix(np.array([[0, 2], [5, 0]]))
>>> B = sparse.csr_matrix(np.array([[1, 2], [3, 4]]))
>>> sparse.kron(A, B).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> sparse.kron(A, [[1, 2], [3, 4]]).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
# B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
# use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n) respectively.
Parameters
----------
A
square matrix
B
square matrix
format : str
format of the result (e.g. "csr")
Returns
-------
kronecker sum in a sparse matrix format
Examples
--------
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype, B.dtype)
L = kron(eye(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, eye(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) # since L + R is not always same format
def _compressed_sparse_stack(blocks, axis):
"""
Stacking fast path for CSR/CSC matrices
(i) vstack for CSR, (ii) hstack for CSC.
"""
other_axis = 1 if axis == 0 else 0
data = np.concatenate([b.data for b in blocks])
constant_dim = blocks[0].shape[other_axis]
idx_dtype = get_index_dtype(arrays=[b.indptr for b in blocks],
maxval=max(data.size, constant_dim))
indices = np.empty(data.size, dtype=idx_dtype)
indptr = np.empty(sum(b.shape[axis] for b in blocks) + 1, dtype=idx_dtype)
last_indptr = idx_dtype(0)
sum_dim = 0
sum_indices = 0
for b in blocks:
if b.shape[other_axis] != constant_dim:
raise ValueError('incompatible dimensions for axis %d' % other_axis)
indices[sum_indices:sum_indices+b.indices.size] = b.indices
sum_indices += b.indices.size
idxs = slice(sum_dim, sum_dim + b.shape[axis])
indptr[idxs] = b.indptr[:-1]
indptr[idxs] += last_indptr
sum_dim += b.shape[axis]
last_indptr += b.indptr[-1]
indptr[-1] = last_indptr
if axis == 0:
return csr_matrix((data, indices, indptr),
shape=(sum_dim, constant_dim))
else:
return csc_matrix((data, indices, indptr),
shape=(constant_dim, sum_dim))
def hstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices horizontally (column wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : str
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
See Also
--------
vstack : stack sparse matrices vertically (row wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, hstack
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> hstack([A,B]).toarray()
array([[1, 2, 5],
[3, 4, 6]])
"""
return bmat([blocks], format=format, dtype=dtype)
def vstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices vertically (row wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : str, optional
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
See Also
--------
hstack : stack sparse matrices horizontally (column wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5, 6]])
>>> vstack([A, B]).toarray()
array([[1, 2],
[3, 4],
[5, 6]])
"""
return bmat([[b] for b in blocks], format=format, dtype=dtype)
def bmat(blocks, format=None, dtype=None):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
----------
blocks : array_like
Grid of sparse matrices with compatible shapes.
An entry of None implies an all-zero matrix.
format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
The sparse format of the result (e.g. "csr"). By default an
appropriate sparse matrix format is returned.
This choice is subject to change.
dtype : dtype, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
bmat : sparse matrix
See Also
--------
block_diag, diags
Examples
--------
>>> from scipy.sparse import coo_matrix, bmat
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> bmat([[A, B], [None, C]]).toarray()
array([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
>>> bmat([[A, None], [None, C]]).toarray()
array([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
"""
blocks = np.asarray(blocks, dtype='object')
if blocks.ndim != 2:
raise ValueError('blocks must be 2-D')
M,N = blocks.shape
# check for fast path cases
if (N == 1 and format in (None, 'csr') and all(isinstance(b, csr_matrix)
for b in blocks.flat)):
A = _compressed_sparse_stack(blocks[:,0], 0)
if dtype is not None:
A = A.astype(dtype)
return A
elif (M == 1 and format in (None, 'csc')
and all(isinstance(b, csc_matrix) for b in blocks.flat)):
A = _compressed_sparse_stack(blocks[0,:], 1)
if dtype is not None:
A = A.astype(dtype)
return A
block_mask = np.zeros(blocks.shape, dtype=bool)
brow_lengths = np.zeros(M, dtype=np.int64)
bcol_lengths = np.zeros(N, dtype=np.int64)
# convert everything to COO format
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = coo_matrix(blocks[i,j])
blocks[i,j] = A
block_mask[i,j] = True
if brow_lengths[i] == 0:
brow_lengths[i] = A.shape[0]
elif brow_lengths[i] != A.shape[0]:
msg = ('blocks[{i},:] has incompatible row dimensions. '
'Got blocks[{i},{j}].shape[0] == {got}, '
'expected {exp}.'.format(i=i, j=j,
exp=brow_lengths[i],
got=A.shape[0]))
raise ValueError(msg)
if bcol_lengths[j] == 0:
bcol_lengths[j] = A.shape[1]
elif bcol_lengths[j] != A.shape[1]:
msg = ('blocks[:,{j}] has incompatible row dimensions. '
'Got blocks[{i},{j}].shape[1] == {got}, '
'expected {exp}.'.format(i=i, j=j,
exp=bcol_lengths[j],
got=A.shape[1]))
raise ValueError(msg)
nnz = sum(block.nnz for block in blocks[block_mask])
if dtype is None:
all_dtypes = [blk.dtype for blk in blocks[block_mask]]
dtype = upcast(*all_dtypes) if all_dtypes else None
row_offsets = np.append(0, np.cumsum(brow_lengths))
col_offsets = np.append(0, np.cumsum(bcol_lengths))
shape = (row_offsets[-1], col_offsets[-1])
data = np.empty(nnz, dtype=dtype)
idx_dtype = get_index_dtype(maxval=max(shape))
row = np.empty(nnz, dtype=idx_dtype)
col = np.empty(nnz, dtype=idx_dtype)
nnz = 0
ii, jj = np.nonzero(block_mask)
for i, j in zip(ii, jj):
B = blocks[i, j]
idx = slice(nnz, nnz + B.nnz)
data[idx] = B.data
row[idx] = B.row + row_offsets[i]
col[idx] = B.col + col_offsets[j]
nnz += B.nnz
return coo_matrix((data, (row, col)), shape=shape).asformat(format)
def block_diag(mats, format=None, dtype=None):
"""
Build a block diagonal sparse matrix from provided matrices.
Parameters
----------
mats : sequence of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the matrix
is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of `blocks`.
Returns
-------
res : sparse matrix
Notes
-----
.. versionadded:: 0.11.0
See Also
--------
bmat, diags
Examples
--------
>>> from scipy.sparse import coo_matrix, block_diag
>>> A = coo_matrix([[1, 2], [3, 4]])
>>> B = coo_matrix([[5], [6]])
>>> C = coo_matrix([[7]])
>>> block_diag((A, B, C)).toarray()
array([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None]*nmat
if issparse(a):
row[ia] = a
else:
row[ia] = coo_matrix(a)
rows.append(row)
return bmat(rows, format=format, dtype=dtype)
def random(m, n, density=0.01, format='coo', dtype=None,
random_state=None, data_rvs=None):
"""Generate a sparse matrix of the given shape and density with randomly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used. This random state will be used
for sampling the sparsity structure, but not necessarily for sampling
the values of the structurally nonzero entries of the matrix.
data_rvs : callable, optional
Samples a requested number of random values.
This function should take a single argument specifying the length
of the ndarray that it will return. The structurally nonzero entries
of the sparse random matrix will be taken from the array sampled
by this function. By default, uniform [0, 1) random values will be
sampled using the same random state as is used for sampling
the sparsity structure.
Returns
-------
res : sparse matrix
Examples
--------
>>> from scipy.sparse import random
>>> from scipy import stats
>>> class CustomRandomState(object):
... def randint(self, k):
... i = np.random.randint(k)
... return i - i % 2
>>> rs = CustomRandomState()
>>> rvs = stats.poisson(25, loc=10).rvs
>>> S = random(3, 4, density=0.25, random_state=rs, data_rvs=rvs)
>>> S.A
array([[ 36., 0., 33., 0.], # random
[ 0., 0., 0., 0.],
[ 0., 0., 36., 0.]])
>>> from scipy.sparse import random
>>> from scipy.stats import rv_continuous
>>> class CustomDistribution(rv_continuous):
... def _rvs(self, *args, **kwargs):
... return self._random_state.randn(*self._size)
>>> X = CustomDistribution(seed=2906)
>>> Y = X() # get a frozen version of the distribution
>>> S = random(3, 4, density=0.25, random_state=2906, data_rvs=Y.rvs)
>>> S.A
array([[ 0. , 1.9467163 , 0.13569738, -0.81205367],
[ 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. ]])
Notes
-----
Only float types are supported for now.
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
dtype = np.dtype(dtype)
if dtype.char not in 'fdg':
raise NotImplementedError("type %s not supported" % dtype)
mn = m * n
tp = np.intc
if mn > np.iinfo(tp).max:
tp = np.int64
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = int(density * m * n)
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
if data_rvs is None:
data_rvs = random_state.rand
# Use the algorithm from python's random.sample for k < mn/3.
if mn < 3*k:
ind = random_state.choice(mn, size=k, replace=False)
else:
ind = np.empty(k, dtype=tp)
selected = set()
for i in xrange(k):
j = random_state.randint(mn)
while j in selected:
j = random_state.randint(mn)
selected.add(j)
ind[i] = j
j = np.floor(ind * 1. / m).astype(tp)
i = (ind - j * m).astype(tp)
vals = data_rvs(k).astype(dtype)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)
def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
"""Generate a sparse matrix of the given shape and density with uniformly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used.
Returns
-------
res : sparse matrix
Notes
-----
Only float types are supported for now.
See Also
--------
scipy.sparse.random : Similar function that allows a user-specified random
data source.
Examples
--------
>>> from scipy.sparse import rand
>>> matrix = rand(3, 4, density=0.25, format="csr", random_state=42)
>>> matrix
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> matrix.todense()
matrix([[ 0. , 0.59685016, 0.779691 , 0. ],
[ 0. , 0. , 0. , 0.44583275],
[ 0. , 0. , 0. , 0. ]])
"""
return random(m, n, density, format, dtype, random_state)
|
|
#!/usr/bin/env python
"""Functions for audit and logging."""
import logging
from logging import handlers
import os
import socket
import time
from grr.lib import config_lib
from grr.lib import flags
# Global Application Logger.
LOGGER = None
class GrrApplicationLogger(object):
"""The GRR application logger.
These records are used for machine readable authentication logging of security
critical events.
"""
def WriteFrontendLogEntry(self, event_id, request, response):
"""Write a log entry for a Frontend or UI Request.
Args:
event_id: String generated by GetNewEventId.
request: A HttpRequest protobuf.
response: A HttpResponse protobuf.
"""
log_msg = "%s-%s %d: %s %s %s %d %s" % (event_id, request.source_ip,
response.code, request.method,
request.url, request.user_agent,
response.size, request.user)
logging.info(log_msg)
def GetNewEventId(self, event_time=None):
"""Return a unique Event ID string."""
if event_time is None:
event_time = long(time.time() * 1e6)
return "%s:%s:%s" % (event_time, socket.gethostname(), os.getpid())
def LogHttpApiCall(self, request, response):
"""Log an api call based on the django.http request and response objects."""
# TODO(user): This is broken, please fix.
return
log_msg = "API call [%s] by %s: %s [%d]" % (response.get("X-API-Method",
"unknown"),
request.user, request.path,
response.status_code)
logging.info(log_msg)
class PreLoggingMemoryHandler(handlers.BufferingHandler):
"""Handler used before logging subsystem is initialized."""
def shouldFlush(self, record):
return len(self.buffer) >= self.capacity
def flush(self):
"""Flush the buffer.
This is called when the buffer is really full, we just just drop one oldest
message.
"""
self.buffer = self.buffer[-self.capacity:]
class RobustSysLogHandler(handlers.SysLogHandler):
"""A handler which does not raise if it fails to connect."""
def __init__(self, *args, **kwargs):
self.formatter = None
try:
super(RobustSysLogHandler, self).__init__(*args, **kwargs)
except socket.error:
pass
def handleError(self, record):
"""Just ignore socket errors - the syslog server might come back."""
BASE_LOG_LEVELS = {
"FileHandler": logging.ERROR,
"NTEventLogHandler": logging.CRITICAL,
"StreamHandler": logging.ERROR,
"RobustSysLogHandler": logging.CRITICAL,
}
VERBOSE_LOG_LEVELS = {
"FileHandler": logging.DEBUG,
"NTEventLogHandler": logging.INFO,
"StreamHandler": logging.DEBUG,
"RobustSysLogHandler": logging.INFO,
}
def SetLogLevels():
logger = logging.getLogger()
if config_lib.CONFIG["Logging.verbose"] or flags.FLAGS.verbose:
levels = VERBOSE_LOG_LEVELS
else:
levels = BASE_LOG_LEVELS
for handler in logger.handlers:
handler.setLevel(levels[handler.__class__.__name__])
def GetLogHandlers():
formatter = logging.Formatter(config_lib.CONFIG["Logging.format"])
engines = config_lib.CONFIG["Logging.engines"]
logging.debug("Will use logging engines %s", engines)
for engine in engines:
try:
if engine == "stderr":
handler = logging.StreamHandler()
handler.setFormatter(formatter)
yield handler
elif engine == "event_log":
handler = handlers.NTEventLogHandler(config_lib.CONFIG[
"Logging.service_name"])
handler.setFormatter(formatter)
yield handler
elif engine == "syslog":
# Allow the specification of UDP sockets.
socket_name = config_lib.CONFIG["Logging.syslog_path"]
if ":" in socket_name:
addr, port = socket_name.split(":", 1)
handler = RobustSysLogHandler((addr, int(port)))
else:
handler = RobustSysLogHandler(socket_name)
handler.setFormatter(formatter)
yield handler
elif engine == "file":
# Create a logfile if needed.
path = config_lib.CONFIG["Logging.filename"]
logging.info("Writing log file to %s", path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
handler = logging.FileHandler(path, mode="ab")
handler.setFormatter(formatter)
yield handler
else:
logging.error("Unknown logging engine %s", engine)
except Exception: # pylint:disable=broad-except
# Failure to log should not be fatal.
logging.exception("Unable to create logger %s", engine)
def LogInit():
"""Configure the logging subsystem."""
logging.debug("Initializing Logging subsystem.")
if flags.FLAGS.verbose:
# verbose flag just sets the logging verbosity level.
config_lib.CONFIG.AddContext(
"Debug Context",
"This context is to allow verbose and debug output from "
"the binary.")
# The root logger.
logger = logging.getLogger()
memory_handlers = [
m for m in logger.handlers
if m.__class__.__name__ == "PreLoggingMemoryHandler"
]
# Clear all handers.
logger.handlers = list(GetLogHandlers())
SetLogLevels()
# Now flush the old messages into the log files.
for handler in memory_handlers:
for record in handler.buffer:
logger.handle(record)
def AppLogInit():
"""Initialize the Application Log.
This log is what will be used whenever someone does a log.LOGGER call. These
are used for more detailed application or event logs.
Returns:
GrrApplicationLogger object
"""
logging.debug("Initializing Application Logger.")
return GrrApplicationLogger()
def ServerLoggingStartupInit():
"""Initialize the server logging configuration."""
global LOGGER
try:
# pylint: disable=g-import-not-at-top
from grr.lib.local import log as local_log
# pylint: enable=g-import-not-at-top
logging.debug("Using local LogInit from %s", local_log)
local_log.LogInit()
logging.debug("Using local AppLogInit from %s", local_log)
LOGGER = local_log.AppLogInit()
except ImportError:
LogInit()
LOGGER = AppLogInit()
# There is a catch 22 here: We need to start logging right away but we will only
# configure the logging system once the config is read. Therefore we set up a
# memory logger now and then when the log destination is configured we replay
# the logs into that. This ensures we do not lose any log messages during early
# program start up.
root_logger = logging.root
root_logger.handlers = [PreLoggingMemoryHandler(1000)]
root_logger.setLevel(logging.DEBUG)
logging.info("Starting GRR Prelogging buffer.")
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# =============================================================================
'''This module includes a set of optimizers for updating model parameters.
Example usage::
from singa import optimizer
from singa import tensor
sgd = optimizer.SGD(lr=0.01, momentum=0.9, weight_decay=1e-4)
p = tensor.Tensor((3,5))
p.uniform(-1, 1)
g = tensor.Tensor((3,5))
g.gaussian(0, 0.01)
sgd.apply(1, g, p, 'param') # use the global lr=0.1 for epoch 1
sgd.apply_with_lr(2, 0.03, g, p, 'param') # use lr=0.03 for epoch 2
'''
from __future__ import division
from __future__ import absolute_import
from builtins import object
import math
from . import singa_wrap as singa
from . import tensor
from .proto import model_pb2
class Optimizer(object):
'''The base python optimizer class.
Typically, an optimizer is used as follows:
1. construct the optimizer
2. (optional) register each parameter with its specs.
3. use the optimizer to update parameter values given parameter gradients
and other optional info
The subclasses should override the apply_with_lr function to do the real
parameter udpate.
Args:
lr (float): a constant value for the learning rate
momentum (float): a constant value for the momentum value
weight_decay (float): the coefficent for L2 regularizer, which is
mutually exclusive with 'regularizer'.
regularizer: an instance of Regularizer or RegularizerConf; If set,
regularization would be applied in apply_with_lr().
Users can also do regularization outside.
constraint: an instance of Constraint or ConstraintConf; If set,
constraint would be applied inside apply_with_lr(). Users can
also apply constraint outside.
'''
def __init__(self, lr=None, momentum=None, weight_decay=None,
regularizer=None, constraint=None):
self.lr = lr
self.momentum = momentum
if weight_decay is not None:
assert regularizer is None, \
'Cannot set weight_decay and regularizer at the same time'
regularizer = L2Regularizer(weight_decay)
if regularizer is not None:
if isinstance(regularizer, model_pb2.RegularizerConf):
self.regularizer = CppRegularizer(regularizer)
else:
self.regularizer = regularizer
else:
self.regularizer = None
if constraint is not None:
if isinstance(constraint, model_pb2.ConstraintConf):
self.constraint = CppConstraint(constraint)
else:
self.constraint = constraint
else:
self.constraint = None
self.regularizers = {}
self.constraints = {}
self.decay_multiplier = {}
self.learning_rate_multiplier = {}
def register(self, name, specs):
'''Register the param specs, including creating regularizer and
constraint per param object. Param specific regularizer and constraint
have higher priority than the global ones. If all parameters share the
same setting for learning rate, regularizer and constraint, then there
is no need to call this function.
Args:
name (str): parameter name
specs (ParamSpec): protobuf obj, including regularizer and
constraint, multipliers for learning rate and weight decay.
'''
assert isinstance(specs, model_pb2.ParamSpec), \
'specs should be model_pb2.ParamSpec instance'
if specs.HasField('regularizer'):
self.regularizers[name] = CppRegularizer(specs.regularizer)
elif specs.decay_mult != 1:
self.regularizers[name] = L2Regularizer(
specs.decay_mult * self.regularizer.coefficient)
if specs.HasField('constraint'):
self.constraints[name] = CppConstraint(specs.constraint)
if specs.lr_mult != 1:
self.learning_rate_multiplier[name] = specs.lr_mult
def apply_regularizer_constraint(self, epoch, value, grad, name=None,
step=-1):
'''Apply regularization and constraint if available.
If there are both global regularizer (constraint) and param specific
regularizer (constraint), it would use the param specific one.
Args:
epoch (int): training epoch ID
value (Tensor): parameter value Tensor
grad (Tensor): parameter gradient Tensor
name (string): to get parameter specific regularizer or constraint
step (int): iteration ID within one epoch
Returns:
the updated gradient Tensor
'''
if name is not None and name in self.constraints:
grad = self.constraints[name].apply(epoch, value, grad, step)
elif self.constraint is not None:
grad = self.constraint.apply(epoch, value, grad, step)
if name is not None and name in self.regularizers:
grad = self.regularizers[name].apply(epoch, value, grad, step)
elif self.regularizer is not None:
grad = self.regularizer.apply(epoch, value, grad, step)
return grad
def apply_with_lr(self, epoch, lr, grad, value, name=None, step=-1):
'''Do update of parameters with given learning rate if the grad is not
empty.
The subclass optimizer must override this function.
This function do nothing if the grad is empty.
Args:
epoch (int): training epoch ID
lr (float): learning rate
grad (Tensor): parameter gradient
value (Tesnor): parameter value
name (string): paramter name to index parameter specific
updating rules (including regularizer and constraint)
step (int): iteration ID within one epoch
Returns:
updated parameter value
'''
assert False, 'This is the base function, pls call the subclass func'
return value
def apply(self, epoch, grad, value, name=None, step=-1):
'''Do update assuming the learning rate generator is set.
The subclass optimizer does not need to override this function.
Args:
epoch (int): training epoch ID
grad (Tensor): parameter gradient
value (Tesnor): parameter value
name (string): paramter name to retrieval parameter specific
updating rules (including regularizer and constraint)
step (int): training iteration ID within one epoch
Return:
updated parameter value
'''
assert self.lr is not None, 'Must set the learning rate, i.e. "lr"'
return self.apply_with_lr(epoch, self.lr, grad, value, name, step)
class SGD(Optimizer):
'''The vallina Stochasitc Gradient Descent algorithm with momentum.
See the base Optimizer for all arguments.
'''
def __init__(self, lr=None, momentum=None, weight_decay=None,
regularizer=None, constraint=None):
super(SGD, self).__init__(lr, momentum, weight_decay, regularizer,
constraint)
conf = model_pb2.OptimizerConf()
if self.momentum is not None:
conf.momentum = self.momentum
conf.type = 'sgd'
self.opt = singa.CreateOptimizer('SGD'.encode())
self.opt.Setup(conf.SerializeToString())
def apply_with_lr(self, epoch, lr, grad, value, name, step=-1):
if grad.is_empty():
return value
grad = self.apply_regularizer_constraint(
epoch, value, grad, name, step)
if name is not None and name in self.learning_rate_multiplier:
lr = lr * self.learning_rate_multiplier[name]
self.opt.Apply(epoch, lr, name.encode(), grad.data,
value.data)
return value
class Nesterov(Optimizer):
'''The SGD with Nesterov momentum.
See the base Optimizer for all arguments.
'''
def __init__(self, lr=None, momentum=0.9, weight_decay=None,
regularizer=None, constraint=None):
super(Nesterov, self).__init__(lr, momentum, weight_decay,
regularizer, constraint)
conf = model_pb2.OptimizerConf()
if self.momentum is not None:
conf.momentum = momentum
conf.type = 'nesterov'
self.opt = singa.CreateOptimizer('Nesterov'.encode())
self.opt.Setup(conf.SerializeToString())
def apply_with_lr(self, epoch, lr, grad, value, name, step=-1):
if grad.is_empty():
return value
grad = self.apply_regularizer_constraint(
epoch, value, grad, name, step)
if name is not None and name in self.learning_rate_multiplier:
lr = lr * self.learning_rate_multiplier[name]
self.opt.Apply(epoch, lr, name.encode(), grad.data,
value.data)
return value
class RMSProp(Optimizer):
'''RMSProp optimizer.
See the base Optimizer for all constructor args.
Args:
rho (float): float within [0, 1]
epsilon (float): small value for preventing numeric error
'''
def __init__(self, rho=0.9, epsilon=1e-8, lr=None, weight_decay=None,
regularizer=None, constraint=None):
super(RMSProp, self).__init__(lr, None, weight_decay, regularizer,
constraint)
conf = model_pb2.OptimizerConf()
conf.rho = rho
conf.delta = epsilon
self.opt = singa.CreateOptimizer('RMSProp'.encode())
self.opt.Setup(conf.SerializeToString())
def apply_with_lr(self, epoch, lr, grad, value, name, step=-1):
if grad.is_empty():
return value
grad = self.apply_regularizer_constraint(
epoch, value, grad, name, step)
if name is not None and name in self.learning_rate_multiplier:
lr = lr * self.learning_rate_multiplier[name]
self.opt.Apply(step, lr, name.encode(), grad.data,
value.data)
return value
class AdaGrad(Optimizer):
'''AdaGrad optimizer.
See the base Optimizer for all constructor args.
Args:
epsilon (float): small number for preventing numeric error.
'''
def __init__(self, epsilon=1e-8, lr=None, weight_decay=None, lr_gen=None,
regularizer=None, constraint=None):
super(AdaGrad, self).__init__(lr, None, weight_decay, regularizer,
constraint)
conf = model_pb2.OptimizerConf()
conf.delta = epsilon
conf.type = 'adagrad'
self.opt = singa.CreateOptimizer('AdaGrad'.encode())
self.opt.Setup(conf.SerializeToString())
def apply_with_lr(self, epoch, lr, grad, value, name, step=-1):
if grad.is_empty():
return value
grad = self.apply_regularizer_constraint(
epoch, value, grad, name, step)
if name is not None and name in self.learning_rate_multiplier:
lr = lr * self.learning_rate_multiplier[name]
self.opt.Apply(epoch, lr, name.encode(), grad.data,
value.data)
return value
class Adam(Optimizer):
'''Adam optimizer.
See the base Optimizer for all constructor args.
Args:
beta_1(float): coefficient of momentum
beta_2(float): coefficient of aggregated squared gradient
epsilon (float): small value for preventing numeric error
'''
def __init__(self, beta_1=0.9, beta_2=0.999, epsilon=1e-8, lr=None,
weight_decay=None, regularizer=None, constraint=None):
super(Adam, self).__init__(lr, None, weight_decay, regularizer,
constraint)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.m = {}
self.v = {}
self.t = 0
self.last_epoch = -1
self.last_step = -1
def apply_with_lr(self, epoch, lr, grad, value, name, step):
'''Update one parameter object.
Args:
step(int): the accumulated training iterations, not the iteration ID
'''
if grad.is_empty():
return value
assert step != -1, 'step should >= 0'
if epoch != self.last_epoch or step != self.last_step:
self.t += 1
self.last_step = step
self.last_epoch = epoch
grad = self.apply_regularizer_constraint(
epoch, value, grad, name, step)
if name is not None and name in self.learning_rate_multiplier:
lr = lr * self.learning_rate_multiplier[name]
if name not in self.m or name not in self.v:
self.m[name] = tensor.Tensor(grad.shape, grad.device, grad.dtype)
self.m[name].set_value(0)
self.v[name] = tensor.Tensor(grad.shape, grad.device, grad.dtype)
self.v[name].set_value(0)
self.m[name] *= self.beta_1
tensor.axpy(1 - self.beta_1, grad, self.m[name])
self.v[name] *= self.beta_2
tensor.axpy(1 - self.beta_2, tensor.square(grad), self.v[name])
alpha = lr * math.sqrt(1 - math.pow(self.beta_2, self.t)) \
/ (1 - math.pow(self.beta_1, self.t))
value -= alpha * self.m[name] / (tensor.sqrt(self.v[name]) +
self.epsilon)
return value
class Regularizer(object):
'''Base Python regularizer for parameter gradients.'''
def apply(self, epoch, value, grad, step=-1):
assert False, 'Not Implemented. Call the subclass function.'
return grad
class CppRegularizer(Regularizer):
'''Wrapper for regularizer implemented using C++.
Args:
conf (RegularizerConf): protobuf message for the configuration.
'''
def __init__(self, conf):
self.reg = singa.CreateRegularizer(conf.type)
self.reg.Setup(conf.SerializeToString())
def apply(self, epoch, value, grad, step=-1):
self.reg.Apply(epoch, value.data, grad.data)
return grad
class L2Regularizer(Regularizer):
'''L2 regularization
Args:
coefficient (float): regularization coefficient.
'''
def __init__(self, coefficient):
self.coefficient = coefficient
def apply(self, epoch, value, grad, step=-1):
# print coefficient, value.l1(), grad.l1()
if self.coefficient != 0:
tensor.axpy(self.coefficient, value, grad)
return grad
class Constraint(object):
'''Base Python constraint class for paramter gradients'''
def apply(self, epoch, value, grad, step=-1):
return grad
class CppConstraint(Constraint):
'''Wrapper for constraints implemented using C++.
Args:
conf (ConstraintConf): protobuf message for the configuration.
'''
def __init__(self, conf):
self.constraint = singa.CreateConstraint(conf.type)
self.constraint.Setup(conf.SerializeToString())
def apply(self, epoch, value, grad, step=-1):
self.constraint.Apply(epoch, value.data, grad.data,
step)
return grad
class L2Constraint(Constraint):
'''Rescale the gradient to make the L2 norm <= a given threshold'''
def __init__(self, threshold=None):
self.threshold = threshold
def apply(self, epoch, value, grad, step=-1):
nrm = grad.l2()
grad *= self.threshold / nrm
return grad
|
|
# coding: utf-8
import os
import numpy as np
from astropy.table import Table, vstack
from astropy.io import fits
import multiprocessing
nproc = multiprocessing.cpu_count() // 2
from desispec.io.util import write_bintable
from desitarget.cuts import isBGS_bright, isBGS_faint
from desiutil.log import get_logger, DEBUG
log = get_logger()
def get_predefined_sim_dict(simname):
seed_multiplier = 164
try:
int_sim = int(simname[-2:])
seed = int(int_sim * seed_multiplier)
except:
seed = int( np.abs( hash(simname) ) / seed_multiplier )
fiducial_settings = {
'suffix': simname, 'use_mock': False,
'nsim': 2, 'nspec': 4000,
'zmin': 0.1, 'zmax': 0.6,
'rmagmin': 17., 'rmagmax': 19.5,
'seed': seed
}
if simname == 'sim01':
simulation_parameters = { } # sim01 contains all fiducial values
elif simname == 'sim02' or simname == 'sim12':
simulation_parameters = { } # sim02 contains all fiducial values
elif simname == 'sim03' or simname == 'sim13':
simulation_parameters = { 'nsim': 10, 'nspec': 1000 }
elif simname == 'sim04' or simname == 'sim14':
simulation_parameters = { 'nsim': 10, 'nspec': 1000 }
elif simname == 'sim05':
simulation_parameters = { 'zmax': 0.8, 'nspec': 800, 'rmagmin': 19.5, 'rmagmax': 20.0 }
elif simname == 'sim06' or simname == 'sim16':
simulation_parameters = { 'zmax': 0.8, 'nspec': 800, 'rmagmin': 19.5, 'rmagmax': 20.0 }
elif simname == 'sim07' or simname == 'sim17':
simulation_parameters = { 'nsim': 10, 'nspec': 200, 'zmax': 0.8, 'rmagmin': 19.5, 'rmagmax': 20.0 }
elif simname == 'sim08' or simname == 'sim18':
simulation_parameters = { 'nsim': 10, 'nspec': 200, 'zmax': 0.8, 'rmagmin': 19.5, 'rmagmax': 20.0 }
else:
simulation_parameters = { } # at this point, all sims use the same sim settings
fiducial_settings.update(simulation_parameters)
return fiducial_settings
def get_predefined_obs_dict(simname):
## Can also define ranges for ref obs conds
# obs_conds['moonfracmin'], obs_conds['moonfracmax']
# obs_conds['moonsepmin'], obs_conds['moonsepmax']
# obs_conds['exptimemin'], obs_conds['exptimemax']
## or even bring in fiducial values
#from desisim.simexp import reference_conditions
#ref_obsconditions = reference_conditions['BGS']
fiducial_conditions = {
'AIRMASS': 1.0,
'SEEING': 1.1,
'MOONALT': -60,
'MOONSEP': 180
}
if simname in ['sim01','sim05']:
specified_conditions = { 'EXPTIME': 300, 'MOONFRAC': 0.0 } # sim01 is the fiducial values
if simname in ['sim02','sim06','sim12','sim16']:
specified_conditions = { 'EXPTIME': 480, 'MOONFRAC': 0.8,
'MOONALT': 30, 'MOONSEP': 120 }
elif simname in ['sim03','sim07','sim13','sim17']:
specified_conditions = { 'exptimemin': 300, 'exptimemax': 720,
'MOONALT': 30,
'MOONFRAC': 0.8,
'MOONSEP': 120 }
elif simname in ['sim04','sim08','sim14','sim18']:
specified_conditions = { 'EXPTIME': 600,
'moonfracmin': 0.6, 'moonfracmax': 0.98,
'MOONALT': 30,
'MOONSEP': 120 }
else:
specified_conditions = { 'EXPTIME': 300, 'MOONFRAC': 0.0 } # defaults to sim01 (fiducial)
if simname[:-1] == 'sim1':
specified_conditions['MOONSEP'] = 60
fiducial_conditions.update(specified_conditions)
return fiducial_conditions
def bgs_sim_spectra(sim, ref_obsconditions, simdir, overwrite=False, verbose=False):
"""Generate spectra for a given set of simulation parameters with
the option of overwriting files.
"""
from desisim.scripts.quickspectra import sim_spectra
rand = np.random.RandomState(sim['seed'])
BGS_template_maker = BGStemplates(rand=rand, verbose=verbose)
# Generate the observing conditions table.
simdata = bgs_write_simdata(sim, ref_obsconditions, simdir, rand, overwrite=overwrite)
randseeds = rand.randint(0,2**14,len(simdata)).astype(int)
for exp, expdata in enumerate(simdata):
randseed = randseeds[exp]
# Generate the observing conditions dictionary.
obs = simdata2obsconditions(expdata)
# Generate the rest-frame templates. Currently not writing out the rest-frame
# templates but we could.
flux, wave, meta = bgs_make_templates(sim, rand, BGS_template_maker)
redshifts = np.asarray(meta['REDSHIFT'])
truefile = os.path.join(simdir, sim['suffix'], 'bgs-{}-{:03}-true.fits'.format(sim['suffix'], exp))
if overwrite or not os.path.isfile(truefile):
write_templates(truefile, flux, wave, meta,overwrite=True)
spectrafile = os.path.join(simdir, sim['suffix'], 'bgs-{}-{:03}.fits'.format(sim['suffix'], exp))
if overwrite or not os.path.isfile(spectrafile):
sourcetypes = np.array(["bgs" for i in range(sim['nspec'])])
sim_spectra(wave, flux, 'bgs', spectrafile, redshift=redshifts, obsconditions=obs,
sourcetype= sourcetypes, seed= randseed, expid= exp)
else:
print('File {} exists...skipping.'.format(spectrafile))
def bgs_redshifts(sim, simdir, overwrite=False):
"""Fit for the redshifts.
"""
from redrock.external.desi import rrdesi
for ii in range(sim['nsim']):
zbestfile = os.path.join(simdir, sim['suffix'], 'bgs-{}-{:03}-zbest.fits'.format(sim['suffix'], ii))
spectrafile = os.path.join(simdir, sim['suffix'], 'bgs-{}-{:03}.fits'.format(sim['suffix'], ii))
if overwrite or not os.path.isfile(zbestfile):
rrdesi(options=['--zbest', zbestfile, '--mp', str(nproc), spectrafile])
else:
print('File {} exists...skipping.'.format(zbestfile))
def bgs_gather_results(sim, simdir, overwrite=False):
"""Gather all the pieces so we can make plots.
"""
from desispec.io.spectra import read_spectra
import fitsio
nspec = sim['nspec']
nall = nspec * sim['nsim']
resultfile = os.path.join(simdir, sim['suffix'], 'bgs-{}-results.fits'.format(sim['suffix']))
if not os.path.isfile(resultfile) or overwrite:
pass
else:
log.info('File {} exists...skipping.'.format(resultfile))
return
cols = [
('EXPTIME', 'f4'),
('AIRMASS', 'f4'),
('MOONFRAC', 'f4'),
('MOONSEP', 'f4'),
('MOONALT', 'f4'),
('SNR_B', 'f4'),
('SNR_R', 'f4'),
('SNR_Z', 'f4'),
('TARGETID', 'i8'),
('TEMPLATEID', 'i4'),
('RMAG', 'f4'),
('GR', 'f4'),
('D4000', 'f4'),
('EWHBETA', 'f4'),
('ZTRUE', 'f4'),
('Z', 'f4'),
('ZERR', 'f4'),
('ZWARN', 'f4')]
result = Table(np.zeros(nall, dtype=cols))
result['EXPTIME'].unit = 's'
result['MOONSEP'].unit = 'deg'
result['MOONALT'].unit = 'deg'
# Read the simulation parameters data table.
simdatafile = os.path.join(simdir, sim['suffix'], 'bgs-{}-simdata.fits'.format(sim['suffix']))
simdata = Table.read(simdatafile)
for ii, simdata1 in enumerate(simdata):
# Copy over some data.
result['EXPTIME'][nspec*ii:nspec*(ii+1)] = simdata1['EXPTIME']
result['AIRMASS'][nspec*ii:nspec*(ii+1)] = simdata1['AIRMASS']
result['MOONFRAC'][nspec*ii:nspec*(ii+1)] = simdata1['MOONFRAC']
result['MOONSEP'][nspec*ii:nspec*(ii+1)] = simdata1['MOONSEP']
result['MOONALT'][nspec*ii:nspec*(ii+1)] = simdata1['MOONALT']
# Read the metadata table.
truefile = os.path.join(simdir, sim['suffix'], 'bgs-{}-{:03}-true.fits'.format(sim['suffix'], ii))
if os.path.isfile(truefile):
log.info('Reading {}'.format(truefile))
meta = Table.read(truefile)
#result['TARGETID'][nspec*ib:nspec*(ii+1)] = truth['TARGETID']
result['TEMPLATEID'][nspec*ii:nspec*(ii+1)] = meta['TEMPLATEID']
result['RMAG'][nspec*ii:nspec*(ii+1)] = 22.5 - 2.5 * np.log10(meta['FLUX_R'])
result['GR'][nspec*ii:nspec*(ii+1)] = -2.5 * np.log10(meta['FLUX_G'] / meta['FLUX_R'])
result['D4000'][nspec*ii:nspec*(ii+1)] = meta['D4000']
result['EWHBETA'][nspec*ii:nspec*(ii+1)] = meta['EWHBETA']
result['ZTRUE'][nspec*ii:nspec*(ii+1)] = meta['REDSHIFT']
# Read the zbest file.
zbestfile = os.path.join(simdir, sim['suffix'], 'bgs-{}-{:03}-zbest.fits'.format(sim['suffix'], ii))
if os.path.isfile(zbestfile):
log.info('Reading {}'.format(zbestfile))
#zbest = fitsio.read(zbestfile, 'ZBEST')
#from astropy.table import Table
zbest = Table.read(zbestfile,'ZBEST')
# Assume the tables are row-ordered!
result['Z'][nspec*ii:nspec*(ii+1)] = zbest['Z']
result['ZERR'][nspec*ii:nspec*(ii+1)] = zbest['ZERR']
result['ZWARN'][nspec*ii:nspec*(ii+1)] = zbest['ZWARN']
# Finally, read the spectra to get the S/N.
spectrafile = os.path.join(simdir, sim['suffix'], 'bgs-{}-{:03}.fits'.format(sim['suffix'], ii))
if os.path.isfile(spectrafile):
log.info('Reading {}'.format(spectrafile))
spec = read_spectra(spectrafile)
for band in ('b','r','z'):
for iobj in range(nspec):
these = np.where((spec.wave[band] > np.mean(spec.wave[band])-50) *
(spec.wave[band] < np.mean(spec.wave[band])+50) *
(spec.flux[band][iobj, :] > 0))[0]
result['SNR_{}'.format(band.upper())][nspec*ii+iobj] = (
np.median( spec.flux[band][iobj, these] * np.sqrt(spec.ivar[band][iobj, these]) )
)
log.info('Writing {}'.format(resultfile))
write_bintable(resultfile, result, extname='RESULTS', clobber=True)
def bgs_write_simdata(sim, obs_conds, simdir, obsrand, overwrite=False):
"""Build and write a metadata table with the simulation inputs.
Currently, the only quantities that can be varied are moonfrac,
moonsep, and exptime, but more choices can be added as needed.
"""
from desispec.io.util import makepath
simdatafile = os.path.join(simdir, sim['suffix'], 'bgs-{}-simdata.fits'.format(sim['suffix']))
makepath(simdatafile)
cols = [
('SEED', 'S20'),
('NSPEC', 'i4'),
('EXPTIME', 'f4'),
('AIRMASS', 'f4'),
('SEEING', 'f4'),
('MOONFRAC', 'f4'),
('MOONSEP', 'f4'),
('MOONALT', 'f4')]
simdata = Table(np.zeros(sim['nsim'], dtype=cols))
simdata['EXPTIME'].unit = 's'
simdata['SEEING'].unit = 'arcsec'
simdata['MOONSEP'].unit = 'deg'
simdata['MOONALT'].unit = 'deg'
simdata['SEED'] = sim['seed']
simdata['NSPEC'] = sim['nspec']
simdata['AIRMASS'] = obs_conds['AIRMASS']
simdata['SEEING'] = obs_conds['SEEING']
simdata['MOONALT'] = obs_conds['MOONALT']
if 'moonfracmin' in obs_conds.keys():
simdata['MOONFRAC'] = obsrand.uniform(obs_conds['moonfracmin'], obs_conds['moonfracmax'], sim['nsim'])
else:
simdata['MOONFRAC'] = obs_conds['MOONFRAC']
if 'moonsepmin' in obs_conds.keys():
simdata['MOONSEP'] = obsrand.uniform(obs_conds['moonsepmin'], obs_conds['moonsepmax'], sim['nsim'])
else:
simdata['MOONSEP'] = obs_conds['MOONSEP']
if 'exptimemin' in obs_conds.keys():
simdata['EXPTIME'] = obsrand.uniform(obs_conds['exptimemin'], obs_conds['exptimemax'], sim['nsim'])
else:
simdata['EXPTIME'] = obs_conds['EXPTIME']
if overwrite or not os.path.isfile(simdatafile):
print('Writing {}'.format(simdatafile))
write_bintable(simdatafile, simdata, extname='SIMDATA', clobber=overwrite)
return simdata
def simdata2obsconditions(simdata):
obs = dict(AIRMASS=simdata['AIRMASS'],
EXPTIME=simdata['EXPTIME'],
MOONALT=simdata['MOONALT'],
MOONFRAC=simdata['MOONFRAC'],
MOONSEP=simdata['MOONSEP'],
SEEING=simdata['SEEING'])
return obs
def bgs_make_templates(sim, rand, BGSmaker):
"""Generate the actual templates. If using the mock data then iterate
until we build the desired number of models after applying targeting cuts,
otherwise use specified priors on magnitude and redshift.
"""
redshift = rand.uniform(sim['zmin'], sim['zmax'], size=sim['nspec'])
rmag = rand.uniform(sim['rmagmin'], sim['rmagmax'], size=sim['nspec'])
flux, wave, meta = BGSmaker.bgs_templates.make_templates(
nmodel=sim['nspec'], redshift=redshift, mag=rmag, seed=sim['seed'])
return flux, wave, meta
def write_templates(outfile, flux, wave, meta,overwrite=True):
import astropy.units as u
from astropy.io import fits
hx = fits.HDUList()
hdu_wave = fits.PrimaryHDU(wave)
hdu_wave.header['EXTNAME'] = 'WAVE'
hdu_wave.header['BUNIT'] = 'Angstrom'
hdu_wave.header['AIRORVAC'] = ('vac', 'Vacuum wavelengths')
hx.append(hdu_wave)
fluxunits = 1e-17 * u.erg / (u.s * u.cm**2 * u.Angstrom)
hdu_flux = fits.ImageHDU(flux)
hdu_flux.header['EXTNAME'] = 'FLUX'
hdu_flux.header['BUNIT'] = str(fluxunits)
hx.append(hdu_flux)
hdu_meta = fits.table_to_hdu(meta)
hdu_meta.header['EXTNAME'] = 'METADATA'
hx.append(hdu_meta)
print('Writing {}'.format(outfile))
hx.writeto(outfile, clobber=True)
class BGStree(object):
"""Build a KD Tree."""
def __init__(self):
from speclite import filters
from scipy.spatial import cKDTree as KDTree
from desisim.io import read_basis_templates
self.bgs_meta = read_basis_templates(objtype='BGS', onlymeta=True)
self.bgs_tree = KDTree(self._bgs())
def _bgs(self):
"""Quantities we care about: redshift (z), M_0.1r, and 0.1(g-r).
"""
zobj = self.bgs_meta['Z'].data
mabs = self.bgs_meta['SDSS_UGRIZ_ABSMAG_Z01'].data
rmabs = mabs[:, 2]
gr = mabs[:, 1] - mabs[:, 2]
return np.vstack((zobj, rmabs, gr)).T
def query(self, objtype, matrix, subtype=''):
"""Return the nearest template number based on the KD Tree.
Args:
objtype (str): object type
matrix (numpy.ndarray): (M,N) array (M=number of properties,
N=number of objects) in the same format as the corresponding
function for each object type (e.g., self.bgs).
subtype (str, optional): subtype (only for white dwarfs)
Returns:
dist: distance to nearest template
indx: index of nearest template
"""
if objtype.upper() == 'BGS':
dist, indx = self.bgs_tree.query(matrix)
else:
log.warning('Unrecognized SUBTYPE {}!'.format(subtype))
raise ValueError
return dist, indx
class BGStemplates(object):
"""Generate spectra.
"""
def __init__(self, wavemin=None, wavemax=None, dw=0.2,
rand=None, verbose=False):
from desimodel.io import load_throughput
self.tree = BGStree()
# Build a default (buffered) wavelength vector.
if wavemin is None:
wavemin = load_throughput('b').wavemin - 10.0
if wavemax is None:
wavemax = load_throughput('z').wavemax + 10.0
self.wavemin = wavemin
self.wavemax = wavemax
self.dw = dw
self.wave = np.arange(round(wavemin, 1), wavemax, dw)
self.rand = rand
self.verbose = verbose
# Initialize the templates once:
from desisim.templates import BGS
self.bgs_templates = BGS(wave=self.wave)#, normfilter='sdss2010-r') # Need to generalize this!
self.bgs_templates.normline = None # no emission lines!
def bgs(self, data, index=None, mockformat='durham_mxxl_hdf5'):
"""Generate spectra for BGS.
Currently only the MXXL (durham_mxxl_hdf5) mock is supported. DATA
needs to have Z, SDSS_absmag_r01, SDSS_01gr, VDISP, and SEED, which are
assigned in mock.io.read_durham_mxxl_hdf5. See also BGSKDTree.bgs().
"""
from desisim.io import empty_metatable
objtype = 'BGS'
if index is None:
index = np.arange(len(data['Z']))
input_meta = empty_metatable(nmodel=len(index), objtype=objtype)
for inkey, datakey in zip(('SEED', 'MAG', 'REDSHIFT', 'VDISP'),
('SEED', 'MAG', 'Z', 'VDISP')):
input_meta[inkey] = data[datakey][index]
if mockformat.lower() == 'durham_mxxl_hdf5':
alldata = np.vstack((data['Z'][index],
data['SDSS_absmag_r01'][index],
data['SDSS_01gr'][index])).T
_, templateid = self.tree.query(objtype, alldata)
else:
raise ValueError('Unrecognized mockformat {}!'.format(mockformat))
input_meta['TEMPLATEID'] = templateid
flux, _, meta = self.bgs_templates.make_templates(input_meta=input_meta,
nocolorcuts=True, novdisp=False,
verbose=self.verbose)
return flux, meta
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for devappserver2.admin.xmpp_request_handler."""
import unittest
import google
from lxml import doctestcompare
import mox
import webapp2
from google.appengine.tools.devappserver2 import dispatcher
from google.appengine.tools.devappserver2.admin import xmpp_request_handler
class CompareXml(mox.Comparator):
"""Compare XML using only semantic differences e.g. ignore attribute order."""
def __init__(self, xml):
self._xml = xml
def equals(self, rhs):
checker = doctestcompare.LXMLOutputChecker()
return checker.check_output(self._xml, rhs, 0)
class TestFormData(unittest.TestCase):
"""Tests for xmpp_request_handler._FormData."""
def test(self):
form_data = xmpp_request_handler._FormData()
form_data.add_text('message', u'\N{White Smiling Face}', 'plain')
form_data.add_text('stanza', '<p>This is\na\ntest!</p>', 'xml')
boundary, content = form_data.get_boundary_and_content()
self.assertMultiLineEqual(
'--{boundary}\r\n'
'Content-Type: text/plain; charset="UTF-8"\r\n'
'Content-Disposition: form-data; name="message"\r\n'
'\r\n'
'\xe2\x98\xba\r\n'
'--{boundary}\r\n'
'Content-Type: text/xml; charset="UTF-8"\r\n'
'Content-Disposition: form-data; name="stanza"\r\n'
'\r\n'
'<p>This is\na\ntest!</p>\r\n'
'--{boundary}--\r\n'.format(boundary=boundary),
content
)
class TestXmppRequestHandler(unittest.TestCase):
"""Tests for xmpp_request_handler.XmppRequestHandler."""
def setUp(self):
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
def test_send(self):
self.mox.StubOutWithMock(xmpp_request_handler.XmppRequestHandler,
'dispatcher')
handler = xmpp_request_handler.XmppRequestHandler()
handler.dispatcher = self.mox.CreateMock(dispatcher.Dispatcher)
handler.dispatcher.add_request(
method='POST',
relative_url='url',
headers=[('Content-Type',
mox.Regex('multipart/form-data; boundary=".*?"'))],
body=mox.IsA(str),
source_ip='0.1.0.10',
fake_login=True)
data = xmpp_request_handler._FormData()
self.mox.ReplayAll()
handler._send('url', data)
self.mox.VerifyAll()
def test_chat(self):
self.mox.StubOutWithMock(xmpp_request_handler.XmppRequestHandler, '_send')
request = webapp2.Request.blank('/xmpp', POST={'message_type': 'chat',
'to': 'foo@example.com',
'from': 'baz@example.com',
'chat': 'Chat content'})
response = webapp2.Response()
handler = xmpp_request_handler.XmppRequestHandler(request, response)
data = xmpp_request_handler._FormData()
data.add_text('from', 'baz@example.com', 'plain')
data.add_text('to', 'foo@example.com', 'plain')
data.add_text('body', 'Chat content', 'plain')
data.add_text(
'stanza',
CompareXml(
'<ns0:message from="baz@example.com" to="foo@example.com" '
'type="chat" xmlns:ns0="jabber:client">'
'<ns0:body>Chat content</ns0:body>'
'</ns0:message>'),
'xml')
handler._send('/_ah/xmpp/message/chat/', data).AndReturn(
dispatcher.ResponseTuple('404 Not Found', [], 'Response'))
self.mox.ReplayAll()
handler.post()
self.mox.VerifyAll()
self.assertEqual('404 Not Found', response.status)
def test_presence_available(self):
self.mox.StubOutWithMock(xmpp_request_handler.XmppRequestHandler, '_send')
request = webapp2.Request.blank('/xmpp', POST={'message_type': 'presence',
'to': 'foo@example.com',
'from': 'baz@example.com',
'presence': 'available'})
response = webapp2.Response()
handler = xmpp_request_handler.XmppRequestHandler(request, response)
data = xmpp_request_handler._FormData()
data.add_text('from', 'baz@example.com', 'plain')
data.add_text('to', 'foo@example.com', 'plain')
data.add_text(
'stanza',
CompareXml(
'<ns0:presence from="baz@example.com" to="foo@example.com" '
'xmlns:ns0="jabber:client" />'),
'xml')
handler._send('/_ah/xmpp/presence/available/', data).AndReturn(
dispatcher.ResponseTuple('404 Not Found', [], 'Response'))
self.mox.ReplayAll()
handler.post()
self.mox.VerifyAll()
self.assertEqual('404 Not Found', response.status)
def test_presence_unavailable(self):
self.mox.StubOutWithMock(xmpp_request_handler.XmppRequestHandler, '_send')
request = webapp2.Request.blank('/xmpp', POST={'message_type': 'presence',
'to': 'foo@example.com',
'from': 'baz@example.com',
'presence': 'unavailable'})
response = webapp2.Response()
handler = xmpp_request_handler.XmppRequestHandler(request, response)
data = xmpp_request_handler._FormData()
data.add_text('from', 'baz@example.com', 'plain')
data.add_text('to', 'foo@example.com', 'plain')
data.add_text(
'stanza',
CompareXml(
'<ns0:presence from="baz@example.com" to="foo@example.com" '
'type="unavailable" xmlns:ns0="jabber:client" />'),
'xml')
handler._send('/_ah/xmpp/presence/unavailable/', data).AndReturn(
dispatcher.ResponseTuple('404 Not Found', [], 'Response'))
self.mox.ReplayAll()
handler.post()
self.mox.VerifyAll()
self.assertEqual('404 Not Found', response.status)
def test_subscribe(self):
self.mox.StubOutWithMock(xmpp_request_handler.XmppRequestHandler, '_send')
request = webapp2.Request.blank('/xmpp',
POST={'message_type': 'subscribe',
'to': 'foo@example.com',
'from': 'baz@example.com',
'subscription_type': 'subscribe'})
response = webapp2.Response()
handler = xmpp_request_handler.XmppRequestHandler(request, response)
data = xmpp_request_handler._FormData()
data.add_text('from', 'baz@example.com', 'plain')
data.add_text('to', 'foo@example.com', 'plain')
data.add_text(
'stanza',
CompareXml(
'<ns0:presence from="baz@example.com" to="foo@example.com" '
'type="subscribe" xmlns:ns0="jabber:client" />'),
'xml')
handler._send('/_ah/xmpp/subscription/subscribe/', data).AndReturn(
dispatcher.ResponseTuple('404 Not Found', [], 'Response'))
self.mox.ReplayAll()
handler.post()
self.mox.VerifyAll()
self.assertEqual('404 Not Found', response.status)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler Service
"""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import importutils
from cinder import context
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE
from cinder import manager
from cinder.openstack.common import log as logging
from cinder import quota
from cinder import rpc
from cinder.scheduler.flows import create_volume
from cinder.volume import rpcapi as volume_rpcapi
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
default='cinder.scheduler.filter_scheduler.'
'FilterScheduler',
help='Default scheduler driver to use')
CONF = cfg.CONF
CONF.register_opt(scheduler_driver_opt)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
class SchedulerManager(manager.Manager):
"""Chooses a host to create volumes."""
RPC_API_VERSION = '1.7'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, scheduler_driver=None, service_name=None,
*args, **kwargs):
if not scheduler_driver:
scheduler_driver = CONF.scheduler_driver
if scheduler_driver in ['cinder.scheduler.chance.ChanceScheduler',
'cinder.scheduler.simple.SimpleScheduler']:
scheduler_driver = ('cinder.scheduler.filter_scheduler.'
'FilterScheduler')
LOG.deprecated(_('ChanceScheduler and SimpleScheduler have been '
'deprecated due to lack of support for advanced '
'features like: volume types, volume encryption,'
' QoS etc. These two schedulers can be fully '
'replaced by FilterScheduler with certain '
'combination of filters and weighers.'))
self.driver = importutils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(*args, **kwargs)
def init_host(self):
ctxt = context.get_admin_context()
self.request_service_capabilities(ctxt)
def update_service_capabilities(self, context, service_name=None,
host=None, capabilities=None, **kwargs):
"""Process a capability update from a service node."""
if capabilities is None:
capabilities = {}
self.driver.update_service_capabilities(service_name,
host,
capabilities)
def create_consistencygroup(self, context, topic,
group_id,
request_spec_list=None,
filter_properties_list=None):
try:
self.driver.schedule_create_consistencygroup(
context, group_id,
request_spec_list,
filter_properties_list)
except exception.NoValidHost:
msg = (_("Could not find a host for consistency group "
"%(group_id)s.") %
{'group_id': group_id})
LOG.error(msg)
db.consistencygroup_update(context, group_id,
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to create consistency group "
"%(group_id)s."),
{'group_id': group_id})
db.consistencygroup_update(context, group_id,
{'status': 'error'})
def create_volume(self, context, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
filter_properties=None):
try:
flow_engine = create_volume.get_flow(context,
db, self.driver,
request_spec,
filter_properties,
volume_id,
snapshot_id,
image_id)
except Exception:
LOG.exception(_LE("Failed to create scheduler "
"manager volume flow"))
raise exception.CinderException(
_("Failed to create scheduler manager volume flow"))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
def request_service_capabilities(self, context):
volume_rpcapi.VolumeAPI().publish_service_capabilities(context)
def migrate_volume_to_host(self, context, topic, volume_id, host,
force_host_copy, request_spec,
filter_properties=None):
"""Ensure that the host exists and can accept the volume."""
def _migrate_volume_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'migration_status': None}}
self._set_volume_state_and_notify('migrate_volume_to_host',
volume_state,
context, ex, request_spec)
try:
tgt_host = self.driver.host_passes_filters(context, host,
request_spec,
filter_properties)
except exception.NoValidHost as ex:
_migrate_volume_set_error(self, context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
_migrate_volume_set_error(self, context, ex, request_spec)
else:
volume_ref = db.volume_get(context, volume_id)
volume_rpcapi.VolumeAPI().migrate_volume(context, volume_ref,
tgt_host,
force_host_copy)
def retype(self, context, topic, volume_id,
request_spec, filter_properties=None):
"""Schedule the modification of a volume's type.
:param context: the request context
:param topic: the topic listened on
:param volume_id: the ID of the volume to retype
:param request_spec: parameters for this retype request
:param filter_properties: parameters to filter by
"""
def _retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations):
if reservations:
QUOTAS.rollback(context, reservations)
if (volume_ref['instance_uuid'] is None and
volume_ref['attached_host'] is None):
orig_status = 'available'
else:
orig_status = 'in-use'
volume_state = {'volume_state': {'status': orig_status}}
self._set_volume_state_and_notify('retype', volume_state,
context, ex, request_spec, msg)
volume_ref = db.volume_get(context, volume_id)
reservations = request_spec.get('quota_reservations')
new_type = request_spec.get('volume_type')
if new_type is None:
msg = _('New volume type not specified in request_spec.')
ex = exception.ParameterNotFound(param='volume_type')
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations)
# Default migration policy is 'never'
migration_policy = request_spec.get('migration_policy')
if not migration_policy:
migration_policy = 'never'
try:
tgt_host = self.driver.find_retype_host(context, request_spec,
filter_properties,
migration_policy)
except exception.NoValidHost as ex:
msg = (_("Could not find a host for volume %(volume_id)s with "
"type %(type_id)s.") %
{'type_id': new_type['id'], 'volume_id': volume_id})
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations)
except Exception as ex:
with excutils.save_and_reraise_exception():
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, None, reservations)
else:
volume_rpcapi.VolumeAPI().retype(context, volume_ref,
new_type['id'], tgt_host,
migration_policy, reservations)
def manage_existing(self, context, topic, volume_id,
request_spec, filter_properties=None):
"""Ensure that the host exists and can accept the volume."""
def _manage_existing_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'status': 'error'}}
self._set_volume_state_and_notify('manage_existing', volume_state,
context, ex, request_spec)
volume_ref = db.volume_get(context, volume_id)
try:
self.driver.host_passes_filters(context,
volume_ref['host'],
request_spec,
filter_properties)
except exception.NoValidHost as ex:
_manage_existing_set_error(self, context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
_manage_existing_set_error(self, context, ex, request_spec)
else:
volume_rpcapi.VolumeAPI().manage_existing(context, volume_ref,
request_spec.get('ref'))
def get_pools(self, context, filters=None):
"""Get active pools from scheduler's cache."""
return self.driver.get_pools(context, filters)
def _set_volume_state_and_notify(self, method, updates, context, ex,
request_spec, msg=None):
# TODO(harlowja): move into a task that just does this later.
if not msg:
msg = (_("Failed to schedule_%(method)s: %(ex)s") %
{'method': method, 'ex': ex})
LOG.error(msg)
volume_state = updates['volume_state']
properties = request_spec.get('volume_properties', {})
volume_id = request_spec.get('volume_id', None)
if volume_id:
db.volume_update(context, volume_id, volume_state)
payload = dict(request_spec=request_spec,
volume_properties=properties,
volume_id=volume_id,
state=volume_state,
method=method,
reason=ex)
rpc.get_notifier("scheduler").error(context,
'scheduler.' + method,
payload)
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Script to generate a linker script organizing application memory partitions
Applications may declare build-time memory domain partitions with
K_APPMEM_PARTITION_DEFINE, and assign globals to them using K_APP_DMEM
or K_APP_BMEM macros. For each of these partitions, we need to
route all their data into appropriately-sized memory areas which meet the
size/alignment constraints of the memory protection hardware.
This linker script is created very early in the build process, before
the build attempts to link the kernel binary, as the linker script this
tool generates is a necessary pre-condition for kernel linking. We extract
the set of memory partitions to generate by looking for variables which
have been assigned to input sections that follow a defined naming convention.
We also allow entire libraries to be pulled in to assign their globals
to a particular memory partition via command line directives.
This script takes as inputs:
- The base directory to look for compiled objects
- key/value pairs mapping static library files to what partitions their globals
should end up in.
The output is a linker script fragment containing the definition of the
app shared memory section, which is further divided, for each partition
found, into data and BSS for each partition.
"""
import sys
import argparse
import json
import os
import re
from collections import OrderedDict
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
import elftools.common.exceptions
SZ = 'size'
SRC = 'sources'
LIB = 'libraries'
# This script will create sections and linker variables to place the
# application shared memory partitions.
# these are later read by the macros defined in app_memdomain.h for
# initialization purpose when USERSPACE is enabled.
data_template = """
/* Auto generated code do not modify */
SMEM_PARTITION_ALIGN(z_data_smem_{0}_bss_end - z_data_smem_{0}_part_start);
z_data_smem_{0}_part_start = .;
KEEP(*(data_smem_{0}_data*))
"""
library_data_template = """
*{0}:*(.data .data.* .sdata .sdata.*)
"""
bss_template = """
z_data_smem_{0}_bss_start = .;
KEEP(*(data_smem_{0}_bss*))
"""
library_bss_template = """
*{0}:*(.bss .bss.* .sbss .sbss.* COMMON COMMON.*)
"""
footer_template = """
z_data_smem_{0}_bss_end = .;
SMEM_PARTITION_ALIGN(z_data_smem_{0}_bss_end - z_data_smem_{0}_part_start);
z_data_smem_{0}_part_end = .;
"""
linker_start_seq = """
SECTION_PROLOGUE(_APP_SMEM{1}_SECTION_NAME,,)
{{
APP_SHARED_ALIGN;
_app_smem{0}_start = .;
"""
linker_end_seq = """
APP_SHARED_ALIGN;
_app_smem{0}_end = .;
}} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
"""
empty_app_smem = """
SECTION_PROLOGUE(_APP_SMEM{1}_SECTION_NAME,,)
{{
#ifdef EMPTY_APP_SHARED_ALIGN
EMPTY_APP_SHARED_ALIGN;
#endif
_app_smem{0}_start = .;
_app_smem{0}_end = .;
}} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
"""
size_cal_string = """
z_data_smem_{0}_part_size = z_data_smem_{0}_part_end - z_data_smem_{0}_part_start;
z_data_smem_{0}_bss_size = z_data_smem_{0}_bss_end - z_data_smem_{0}_bss_start;
"""
section_regex = re.compile(r'data_smem_([A-Za-z0-9_]*)_(data|bss)*')
elf_part_size_regex = re.compile(r'z_data_smem_(.*)_part_size')
def find_obj_file_partitions(filename, partitions):
with open(filename, 'rb') as f:
try:
full_lib = ELFFile(f)
except elftools.common.exceptions.ELFError as e:
exit(f"Error: {filename}: {e}")
if not full_lib:
sys.exit("Error parsing file: " + filename)
sections = [x for x in full_lib.iter_sections()]
for section in sections:
m = section_regex.match(section.name)
if not m:
continue
partition_name = m.groups()[0]
if partition_name not in partitions:
partitions[partition_name] = {SZ: section.header.sh_size}
if args.verbose:
partitions[partition_name][SRC] = filename
else:
partitions[partition_name][SZ] += section.header.sh_size
return partitions
def parse_obj_files(partitions):
# Iterate over all object files to find partitions
for dirpath, _, files in os.walk(args.directory):
for filename in files:
if re.match(r".*\.obj$", filename):
fullname = os.path.join(dirpath, filename)
fsize = os.path.getsize(fullname)
if fsize != 0:
find_obj_file_partitions(fullname, partitions)
def parse_compile_command_file(partitions):
# Iterate over all entries to find object files.
# Thereafter process each object file to find partitions
object_pattern = re.compile(r'-o\s+(\S*)')
with open(args.compile_commands_file, 'rb') as f:
commands = json.load(f)
for command in commands:
build_dir = command.get('directory')
compile_command = command.get('command')
compile_arg = object_pattern.search(compile_command)
obj_file = None if compile_arg is None else compile_arg.group(1)
if obj_file:
fullname = os.path.join(build_dir, obj_file)
# Because of issue #40635, then not all objects referenced by
# the compile_commands.json file may be available, therefore
# only include existing files.
if os.path.exists(fullname):
find_obj_file_partitions(fullname, partitions)
def parse_elf_file(partitions):
with open(args.elf, 'rb') as f:
try:
elffile = ELFFile(f)
except elftools.common.exceptions.ELFError as e:
exit(f"Error: {args.elf}: {e}")
symbol_tbls = [s for s in elffile.iter_sections()
if isinstance(s, SymbolTableSection)]
for section in symbol_tbls:
for symbol in section.iter_symbols():
if symbol['st_shndx'] != "SHN_ABS":
continue
x = elf_part_size_regex.match(symbol.name)
if not x:
continue
partition_name = x.groups()[0]
size = symbol['st_value']
if partition_name not in partitions:
partitions[partition_name] = {SZ: size}
if args.verbose:
partitions[partition_name][SRC] = args.elf
else:
partitions[partition_name][SZ] += size
def generate_final_linker(linker_file, partitions, lnkr_sect=""):
string = ""
if len(partitions) > 0:
string = linker_start_seq.format(lnkr_sect, lnkr_sect.upper())
size_string = ''
for partition, item in partitions.items():
string += data_template.format(partition)
if LIB in item:
for lib in item[LIB]:
string += library_data_template.format(lib)
string += bss_template.format(partition, lnkr_sect)
if LIB in item:
for lib in item[LIB]:
string += library_bss_template.format(lib)
string += footer_template.format(partition)
size_string += size_cal_string.format(partition)
string += linker_end_seq.format(lnkr_sect)
string += size_string
else:
string = empty_app_smem.format(lnkr_sect, lnkr_sect.upper())
with open(linker_file, "w") as fw:
fw.write(string)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-d", "--directory", required=False, default=None,
help="Root build directory")
parser.add_argument("-e", "--elf", required=False, default=None,
help="ELF file")
parser.add_argument("-f", "--compile-commands-file", required=False,
default=None, help="CMake compile commands file")
parser.add_argument("-o", "--output", required=False,
help="Output ld file")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Verbose Output")
parser.add_argument("-l", "--library", nargs=2, action="append", default=[],
metavar=("LIBRARY", "PARTITION"),
help="Include globals for a particular library or object filename into a designated partition")
parser.add_argument("--pinoutput", required=False,
help="Output ld file for pinned sections")
parser.add_argument("--pinpartitions", action="store", required=False, default="",
help="Comma separated names of partitions to be pinned in physical memory")
args = parser.parse_args()
def main():
parse_args()
partitions = {}
if args.directory is not None:
parse_obj_files(partitions)
if args.compile_commands_file is not None:
parse_compile_command_file(partitions)
elif args.elf is not None:
parse_elf_file(partitions)
else:
return
for lib, ptn in args.library:
if ptn not in partitions:
partitions[ptn] = {}
if LIB not in partitions[ptn]:
partitions[ptn][LIB] = [lib]
else:
partitions[ptn][LIB].append(lib)
if args.pinoutput:
pin_part_names = args.pinpartitions.split(',')
generic_partitions = {key: value for key, value in partitions.items()
if key not in pin_part_names}
pinned_partitions = {key: value for key, value in partitions.items()
if key in pin_part_names}
else:
generic_partitions = partitions
# Sample partitions.items() list before sorting:
# [ ('part1', {'size': 64}), ('part3', {'size': 64}, ...
# ('part0', {'size': 334}) ]
decreasing_tuples = sorted(generic_partitions.items(),
key=lambda x: (x[1][SZ], x[0]), reverse=True)
partsorted = OrderedDict(decreasing_tuples)
generate_final_linker(args.output, partsorted)
if args.verbose:
print("Partitions retrieved:")
for key in partsorted:
print(" {0}: size {1}: {2}".format(key,
partsorted[key][SZ],
partsorted[key][SRC]))
if args.pinoutput:
decreasing_tuples = sorted(pinned_partitions.items(),
key=lambda x: (x[1][SZ], x[0]), reverse=True)
partsorted = OrderedDict(decreasing_tuples)
generate_final_linker(args.pinoutput, partsorted, lnkr_sect="_pinned")
if args.verbose:
print("Pinned partitions retrieved:")
for key in partsorted:
print(" {0}: size {1}: {2}".format(key,
partsorted[key][SZ],
partsorted[key][SRC]))
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FSVI networks."""
# pylint: disable=g-bare-generic
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
# pylint: disable=g-importing-member
# pylint: disable=g-no-space-after-docstring-summary
# pylint: disable=g-short-docstring-punctuation
# pylint: disable=logging-format-interpolation
# pylint: disable=logging-fstring-interpolation
# pylint: disable=missing-function-docstring
from functools import partial
from typing import Callable, Tuple
import haiku as hk
import jax
from jax import jit
import jax.numpy as jnp
from uncertainty_baselines.models.resnet50_fsvi import resnet50_fsvi
ACTIVATION_DICT = {"tanh": jnp.tanh, "relu": jax.nn.relu}
class Model:
"""Model."""
def __init__(
self,
output_dim: int,
activation_fn: str = "relu",
stochastic_parameters: bool = False,
linear_model: bool = False,
dropout: bool = False,
dropout_rate: float = 0.0,
):
"""Wrapper of resnet50_fsvi
Args:
output_dim: the output dimension
activation_fn: the type of activation function, e.g. "relu", "tanh"
stochastic_parameters: if True, we keep a variational distribution of
parameters.
linear_model: if True, only put variational distribution on the last layer.
dropout: if True, apply dropout.
dropout_rate: dropout rate if we apply dropout.
"""
self.output_dim = output_dim
self.linear_model = linear_model
self.dropout = dropout
self.dropout_rate = dropout_rate
self.activation_fn = ACTIVATION_DICT[activation_fn]
self.stochastic_parameters = stochastic_parameters
self.forward = hk.transform_with_state(self.make_forward_fn())
@property
def apply_fn(self) -> Callable:
return self.forward.apply
def make_forward_fn(self) -> Callable:
raise NotImplementedError
@partial(
jit, static_argnums=(
0,
5,
))
def predict_f(
self,
params: hk.Params,
state: hk.State,
inputs: jnp.ndarray,
rng_key: jnp.ndarray,
is_training: bool,
) -> jnp.ndarray:
"""Forward pass of model that returns pre-softmax output
Args:
params: parameters of model.
state: state of model, e.g. the mean and std used in batch normalization.
inputs: the input data.
rng_key: jax random key.
is_training: whether the model is in training mode.
Returns:
jax.numpy.ndarray, the pre-softmax output of the model
"""
return self.forward.apply(
params,
state,
rng_key,
inputs,
rng_key,
stochastic=True,
is_training=is_training,
)[0]
@partial(
jit, static_argnums=(
0,
5,
))
def predict_y(
self,
params: hk.Params,
state: hk.State,
inputs: jnp.ndarray,
rng_key: jnp.ndarray,
is_training: bool,
) -> jnp.ndarray:
"""Forward pass of model that returns post-softmax output"""
return jax.nn.softmax(
self.predict_f(params, state, inputs, rng_key, is_training))
def predict_y_multisample(self, params, state, inputs, rng_key, n_samples,
is_training):
"""Monte-Carlo estimate of the post-softmax output using `n_samples` samples."""
return mc_sampling(
fn=partial(
self.predict_y, params, state, inputs, is_training=is_training),
n_samples=n_samples,
rng_key=rng_key,
)
@partial(
jit, static_argnums=(
0,
5,
6,
))
def predict_f_multisample_jitted(
self,
params,
state,
inputs,
rng_key,
n_samples: int,
is_training: bool,
):
"""Jitted version of Monte-Carlo estimate of the pre-softmax output using `n_samples` samples."""
rng_keys = jax.random.split(rng_key, n_samples)
# pylint: disable=g-long-lambda
predict_multisample_fn = lambda rng_key: self.predict_f(
params,
state,
inputs,
rng_key,
is_training,
)
# pylint: enable=g-long-lambda
predict_multisample_fn_vmapped = jax.vmap(
predict_multisample_fn, in_axes=0, out_axes=0)
preds_samples = predict_multisample_fn_vmapped(rng_keys)
preds_mean = preds_samples.mean(axis=0)
preds_var = preds_samples.std(axis=0)**2
return preds_samples, preds_mean, preds_var
@partial(
jit, static_argnums=(
0,
5,
6,
))
def predict_y_multisample_jitted(self, params, state, inputs, rng_key,
n_samples, is_training):
"""Jitted version of Monte-Carlo estimate of the post-softmax output using `n_samples` samples."""
rng_keys = jax.random.split(rng_key, n_samples)
# pylint: disable=g-long-lambda
predict_multisample_fn = lambda rng_key: self.predict_y(
params, state, inputs, rng_key, is_training)
# pylint: enable=g-long-lambda
predict_multisample_fn_vmapped = jax.vmap(
predict_multisample_fn, in_axes=0, out_axes=0)
preds_samples = predict_multisample_fn_vmapped(rng_keys)
preds_mean = preds_samples.mean(0)
preds_var = preds_samples.std(0)**2
return preds_samples, preds_mean, preds_var
class CNN(Model):
"""CNN."""
def __init__(
self,
output_dim: int,
activation_fn: str = "relu",
stochastic_parameters: bool = False,
linear_model: bool = False,
dropout: bool = False,
dropout_rate: float = 0.0,
uniform_init_minval: float = -20.0,
uniform_init_maxval: float = -18.0,
w_init: str = "uniform",
b_init: str = "uniform",
):
self.uniform_init_minval = uniform_init_minval
self.uniform_init_maxval = uniform_init_maxval
self.w_init = w_init
self.b_init = b_init
super().__init__(
output_dim=output_dim,
activation_fn=activation_fn,
stochastic_parameters=stochastic_parameters,
linear_model=linear_model,
dropout=dropout,
dropout_rate=dropout_rate,
)
def make_forward_fn(self) -> Callable:
def forward_fn(inputs, rng_key, stochastic, is_training):
net = resnet50_fsvi(
output_dim=self.output_dim,
stochastic_parameters=self.stochastic_parameters,
dropout=self.dropout,
dropout_rate=self.dropout_rate,
linear_model=self.linear_model,
uniform_init_minval=self.uniform_init_minval,
uniform_init_maxval=self.uniform_init_maxval,
w_init=self.w_init,
b_init=self.b_init,
)
return net(inputs, rng_key, stochastic, is_training)
return forward_fn
def mc_sampling(
fn: Callable, n_samples: int,
rng_key: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Performs Monte Carlo sampling and returns the samples, the mean of samples
and the variance of samples
Args:
fn: a deterministic function that takes in a random key and returns one MC
sample.
n_samples: number of MC samples.
rng_key: jax random key.
Returns:
jax.numpy.ndarray, an array of shape (n_samples, ) + `output_shape`, where
`output_shape` is the shape
of output of `fn`
jax.numpy.ndarray, an array of shape (output_shape,)
jax.numpy.ndarray, an array of shape (output_shape,)
"""
list_of_pred_samples = []
for _ in range(n_samples):
rng_key, subkey = jax.random.split(rng_key)
output = fn(subkey)
list_of_pred_samples.append(jnp.expand_dims(output, 0))
preds_samples = jnp.concatenate(list_of_pred_samples, 0)
preds_mean = preds_samples.mean(axis=0)
preds_var = preds_samples.std(axis=0)**2
return preds_samples, preds_mean, preds_var
|
|
"""SCons.Tool.icl
Tool-specific initialization for the Intel C/C++ compiler.
Supports Linux and Windows compilers, v7 and up.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import math, sys, os.path, glob, string, re
is_windows = sys.platform == 'win32'
is_win64 = is_windows and (os.environ['PROCESSOR_ARCHITECTURE'] == 'AMD64' or
('PROCESSOR_ARCHITEW6432' in os.environ and
os.environ['PROCESSOR_ARCHITEW6432'] == 'AMD64'))
is_linux = sys.platform.startswith('linux')
is_mac = sys.platform == 'darwin'
if is_windows:
import SCons.Tool.msvc
elif is_linux:
import SCons.Tool.gcc
elif is_mac:
import SCons.Tool.gcc
import SCons.Util
import SCons.Warnings
# Exceptions for this tool
class IntelCError(SCons.Errors.InternalError):
pass
class MissingRegistryError(IntelCError): # missing registry entry
pass
class MissingDirError(IntelCError): # dir not found
pass
class NoRegistryModuleError(IntelCError): # can't read registry at all
pass
def uniquify(s):
"""Return a sequence containing only one copy of each unique element from input sequence s.
Does not preserve order.
Input sequence must be hashable (i.e. must be usable as a dictionary key)."""
u = {}
for x in s:
u[x] = 1
return list(u.keys())
def linux_ver_normalize(vstr):
"""Normalize a Linux compiler version number.
Intel changed from "80" to "9.0" in 2005, so we assume if the number
is greater than 60 it's an old-style number and otherwise new-style.
Always returns an old-style float like 80 or 90 for compatibility with Windows.
Shades of Y2K!"""
# Check for version number like 9.1.026: return 91.026
# XXX needs to be updated for 2011+ versions (like 2011.11.344 which is compiler v12.1.5)
m = re.match(r'([0-9]+)\.([0-9]+)\.([0-9]+)', vstr)
if m:
vmaj,vmin,build = m.groups()
return float(vmaj) * 10. + float(vmin) + float(build) / 1000.;
else:
f = float(vstr)
if is_windows:
return f
else:
if f < 60: return f * 10.0
else: return f
def check_abi(abi):
"""Check for valid ABI (application binary interface) name,
and map into canonical one"""
if not abi:
return None
abi = abi.lower()
# valid_abis maps input name to canonical name
if is_windows:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'ia64' : 'ia64',
'em64t' : 'em64t',
'amd64' : 'em64t'}
if is_linux:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'x86_64' : 'x86_64',
'em64t' : 'x86_64',
'amd64' : 'x86_64'}
if is_mac:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'x86_64' : 'x86_64',
'em64t' : 'x86_64'}
try:
abi = valid_abis[abi]
except KeyError:
raise SCons.Errors.UserError("Intel compiler: Invalid ABI %s, valid values are %s"% \
(abi, list(valid_abis.keys())))
return abi
def vercmp(a, b):
"""Compare strings as floats,
but Intel changed Linux naming convention at 9.0"""
return cmp(linux_ver_normalize(b), linux_ver_normalize(a))
def get_version_from_list(v, vlist):
"""See if we can match v (string) in vlist (list of strings)
Linux has to match in a fuzzy way."""
if is_windows:
# Simple case, just find it in the list
if v in vlist: return v
else: return None
else:
# Fuzzy match: normalize version number first, but still return
# original non-normalized form.
fuzz = 0.001
for vi in vlist:
if math.fabs(linux_ver_normalize(vi) - linux_ver_normalize(v)) < fuzz:
return vi
# Not found
return None
def get_intel_registry_value(valuename, version=None, abi=None):
"""
Return a value from the Intel compiler registry tree. (Windows only)
"""
# Open the key:
if is_win64:
K = 'Software\\Wow6432Node\\Intel\\Compilers\\C++\\' + version + '\\'+abi.upper()
else:
K = 'Software\\Intel\\Compilers\\C++\\' + version + '\\'+abi.upper()
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
except SCons.Util.RegError:
# For version 13 and later, check UUID subkeys for valuename
if is_win64:
K = 'Software\\Wow6432Node\\Intel\\Suites\\' + version + "\\Defaults\\C++\\" + abi.upper()
else:
K = 'Software\\Intel\\Suites\\' + version + "\\Defaults\\C++\\" + abi.upper()
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
uuid = SCons.Util.RegQueryValueEx(k, 'SubKey')[0]
if is_win64:
K = 'Software\\Wow6432Node\\Intel\\Suites\\' + version + "\\" + uuid + "\\C++"
else:
K = 'Software\\Intel\\Suites\\' + version + "\\" + uuid + "\\C++"
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
try:
v = SCons.Util.RegQueryValueEx(k, valuename)[0]
return v # or v.encode('iso-8859-1', 'replace') to remove unicode?
except SCons.Util.RegError:
if abi.upper() == 'EM64T':
abi = 'em64t_native'
if is_win64:
K = 'Software\\Wow6432Node\\Intel\\Suites\\' + version + "\\" + uuid + "\\C++\\" + abi.upper()
else:
K = 'Software\\Intel\\Suites\\' + version + "\\" + uuid + "\\C++\\" + abi.upper()
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
try:
v = SCons.Util.RegQueryValueEx(k, valuename)[0]
return v # or v.encode('iso-8859-1', 'replace') to remove unicode?
except SCons.Util.RegError:
raise MissingRegistryError("%s was not found in the registry, for Intel compiler version %s, abi='%s'"%(K, version,abi))
except SCons.Util.RegError:
raise MissingRegistryError("%s was not found in the registry, for Intel compiler version %s, abi='%s'"%(K, version,abi))
except WindowsError:
raise MissingRegistryError("%s was not found in the registry, for Intel compiler version %s, abi='%s'"%(K, version,abi))
# Get the value:
try:
v = SCons.Util.RegQueryValueEx(k, valuename)[0]
return v # or v.encode('iso-8859-1', 'replace') to remove unicode?
except SCons.Util.RegError:
raise MissingRegistryError("%s\\%s was not found in the registry."%(K, valuename))
def get_all_compiler_versions():
"""Returns a sorted list of strings, like "70" or "80" or "9.0"
with most recent compiler version first.
"""
versions=[]
if is_windows:
if is_win64:
keyname = 'Software\\WoW6432Node\\Intel\\Compilers\\C++'
else:
keyname = 'Software\\Intel\\Compilers\\C++'
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
keyname)
except WindowsError:
# For version 13 or later, check for default instance UUID
if is_win64:
keyname = 'Software\\WoW6432Node\\Intel\\Suites'
else:
keyname = 'Software\\Intel\\Suites'
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
keyname)
except WindowsError:
return []
i = 0
versions = []
try:
while i < 100:
subkey = SCons.Util.RegEnumKey(k, i) # raises EnvironmentError
# Check that this refers to an existing dir.
# This is not 100% perfect but should catch common
# installation issues like when the compiler was installed
# and then the install directory deleted or moved (rather
# than uninstalling properly), so the registry values
# are still there.
if subkey == 'Defaults': # Ignore default instances
i = i + 1
continue
ok = False
for try_abi in ('IA32', 'IA32e', 'IA64', 'EM64T'):
try:
d = get_intel_registry_value('ProductDir', subkey, try_abi)
except MissingRegistryError:
continue # not found in reg, keep going
if os.path.exists(d): ok = True
if ok:
versions.append(subkey)
else:
try:
# Registry points to nonexistent dir. Ignore this
# version.
value = get_intel_registry_value('ProductDir', subkey, 'IA32')
except MissingRegistryError, e:
# Registry key is left dangling (potentially
# after uninstalling).
print \
"scons: *** Ignoring the registry key for the Intel compiler version %s.\n" \
"scons: *** It seems that the compiler was uninstalled and that the registry\n" \
"scons: *** was not cleaned up properly.\n" % subkey
else:
print "scons: *** Ignoring "+str(value)
i = i + 1
except EnvironmentError:
# no more subkeys
pass
elif is_linux or is_mac:
for d in glob.glob('/opt/intel_cc_*'):
# Typical dir here is /opt/intel_cc_80.
m = re.search(r'cc_(.*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/cc*/*'):
# Typical dir here is /opt/intel/cc/9.0 for IA32,
# /opt/intel/cce/9.0 for EMT64 (AMD64)
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/Compiler/*'):
# Typical dir here is /opt/intel/Compiler/11.1
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/composerxe-*'):
# Typical dir here is /opt/intel/composerxe-2011.4.184
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/composer_xe_*'):
# Typical dir here is /opt/intel/composer_xe_2011_sp1.11.344
# The _sp1 is useless, the installers are named 2011.9.x, 2011.10.x, 2011.11.x
m = re.search(r'([0-9]{0,4})(?:_sp\d*)?\.([0-9][0-9.]*)$', d)
if m:
versions.append("%s.%s"%(m.group(1), m.group(2)))
def keyfunc(str):
"""Given a dot-separated version string, return a tuple of ints representing it."""
return [int(x) for x in str.split('.')]
# split into ints, sort, then remove dups
return sorted(uniquify(versions), key=keyfunc, reverse=True)
def get_intel_compiler_top(version, abi):
"""
Return the main path to the top-level dir of the Intel compiler,
using the given version.
The compiler will be in <top>/bin/icl.exe (icc on linux),
the include dir is <top>/include, etc.
"""
if is_windows:
if not SCons.Util.can_read_reg:
raise NoRegistryModuleError("No Windows registry module was found")
top = get_intel_registry_value('ProductDir', version, abi)
archdir={'x86_64': 'intel64',
'amd64' : 'intel64',
'em64t' : 'intel64',
'x86' : 'ia32',
'i386' : 'ia32',
'ia32' : 'ia32'
}[abi] # for v11 and greater
# pre-11, icl was in Bin. 11 and later, it's in Bin/<abi> apparently.
if not os.path.exists(os.path.join(top, "Bin", "icl.exe")) \
and not os.path.exists(os.path.join(top, "Bin", abi, "icl.exe")) \
and not os.path.exists(os.path.join(top, "Bin", archdir, "icl.exe")):
raise MissingDirError("Can't find Intel compiler in %s"%(top))
elif is_mac or is_linux:
def find_in_2008style_dir(version):
# first dir is new (>=9.0) style, second is old (8.0) style.
dirs=('/opt/intel/cc/%s', '/opt/intel_cc_%s')
if abi == 'x86_64':
dirs=('/opt/intel/cce/%s',) # 'e' stands for 'em64t', aka x86_64 aka amd64
top=None
for d in dirs:
if os.path.exists(os.path.join(d%version, "bin", "icc")):
top = d%version
break
return top
def find_in_2010style_dir(version):
dirs=('/opt/intel/Compiler/%s/*'%version)
# typically /opt/intel/Compiler/11.1/064 (then bin/intel64/icc)
dirs=glob.glob(dirs)
# find highest sub-version number by reverse sorting and picking first existing one.
dirs.sort()
dirs.reverse()
top=None
for d in dirs:
if (os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or
os.path.exists(os.path.join(d, "bin", "intel64", "icc"))):
top = d
break
return top
def find_in_2011style_dir(version):
# The 2011 (compiler v12) dirs are inconsistent, so just redo the search from
# get_all_compiler_versions and look for a match (search the newest form first)
top=None
for d in glob.glob('/opt/intel/composer_xe_*'):
# Typical dir here is /opt/intel/composer_xe_2011_sp1.11.344
# The _sp1 is useless, the installers are named 2011.9.x, 2011.10.x, 2011.11.x
m = re.search(r'([0-9]{0,4})(?:_sp\d*)?\.([0-9][0-9.]*)$', d)
if m:
cur_ver = "%s.%s"%(m.group(1), m.group(2))
if cur_ver == version and \
(os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or
os.path.exists(os.path.join(d, "bin", "intel64", "icc"))):
top = d
break
if not top:
for d in glob.glob('/opt/intel/composerxe-*'):
# Typical dir here is /opt/intel/composerxe-2011.4.184
m = re.search(r'([0-9][0-9.]*)$', d)
if m and m.group(1) == version and \
(os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or
os.path.exists(os.path.join(d, "bin", "intel64", "icc"))):
top = d
break
return top
top = find_in_2011style_dir(version) or find_in_2010style_dir(version) or find_in_2008style_dir(version)
# print "INTELC: top=",top
if not top:
raise MissingDirError("Can't find version %s Intel compiler in %s (abi='%s')"%(version,top, abi))
return top
def generate(env, version=None, abi=None, topdir=None, verbose=0):
"""Add Builders and construction variables for Intel C/C++ compiler
to an Environment.
args:
version: (string) compiler version to use, like "80"
abi: (string) 'win32' or whatever Itanium version wants
topdir: (string) compiler top dir, like
"c:\Program Files\Intel\Compiler70"
If topdir is used, version and abi are ignored.
verbose: (int) if >0, prints compiler version used.
"""
if not (is_mac or is_linux or is_windows):
# can't handle this platform
return
if is_windows:
SCons.Tool.msvc.generate(env)
elif is_linux:
SCons.Tool.gcc.generate(env)
elif is_mac:
SCons.Tool.gcc.generate(env)
# if version is unspecified, use latest
vlist = get_all_compiler_versions()
if not version:
if vlist:
version = vlist[0]
else:
# User may have specified '90' but we need to get actual dirname '9.0'.
# get_version_from_list does that mapping.
v = get_version_from_list(version, vlist)
if not v:
raise SCons.Errors.UserError("Invalid Intel compiler version %s: "%version + \
"installed versions are %s"%(', '.join(vlist)))
version = v
# if abi is unspecified, use ia32
# alternatives are ia64 for Itanium, or amd64 or em64t or x86_64 (all synonyms here)
abi = check_abi(abi)
if abi is None:
if is_mac or is_linux:
# Check if we are on 64-bit linux, default to 64 then.
uname_m = os.uname()[4]
if uname_m == 'x86_64':
abi = 'x86_64'
else:
abi = 'ia32'
else:
if is_win64:
abi = 'em64t'
else:
abi = 'ia32'
if version and not topdir:
try:
topdir = get_intel_compiler_top(version, abi)
except (SCons.Util.RegError, IntelCError):
topdir = None
if not topdir:
# Normally this is an error, but it might not be if the compiler is
# on $PATH and the user is importing their env.
class ICLTopDirWarning(SCons.Warnings.Warning):
pass
if (is_mac or is_linux) and not env.Detect('icc') or \
is_windows and not env.Detect('icl'):
SCons.Warnings.enableWarningClass(ICLTopDirWarning)
SCons.Warnings.warn(ICLTopDirWarning,
"Failed to find Intel compiler for version='%s', abi='%s'"%
(str(version), str(abi)))
else:
# should be cleaned up to say what this other version is
# since in this case we have some other Intel compiler installed
SCons.Warnings.enableWarningClass(ICLTopDirWarning)
SCons.Warnings.warn(ICLTopDirWarning,
"Can't find Intel compiler top dir for version='%s', abi='%s'"%
(str(version), str(abi)))
if topdir:
archdir={'x86_64': 'intel64',
'amd64' : 'intel64',
'em64t' : 'intel64',
'x86' : 'ia32',
'i386' : 'ia32',
'ia32' : 'ia32'
}[abi] # for v11 and greater
if os.path.exists(os.path.join(topdir, 'bin', archdir)):
bindir="bin/%s"%archdir
libdir="lib/%s"%archdir
else:
bindir="bin"
libdir="lib"
if verbose:
print "Intel C compiler: using version %s (%g), abi %s, in '%s/%s'"%\
(repr(version), linux_ver_normalize(version),abi,topdir,bindir)
if is_linux:
# Show the actual compiler version by running the compiler.
os.system('%s/%s/icc --version'%(topdir,bindir))
if is_mac:
# Show the actual compiler version by running the compiler.
os.system('%s/%s/icc --version'%(topdir,bindir))
env['INTEL_C_COMPILER_TOP'] = topdir
if is_linux:
paths={'INCLUDE' : 'include',
'LIB' : libdir,
'PATH' : bindir,
'LD_LIBRARY_PATH' : libdir}
for p in paths.keys():
env.PrependENVPath(p, os.path.join(topdir, paths[p]))
if is_mac:
paths={'INCLUDE' : 'include',
'LIB' : libdir,
'PATH' : bindir,
'LD_LIBRARY_PATH' : libdir}
for p in paths.keys():
env.PrependENVPath(p, os.path.join(topdir, paths[p]))
if is_windows:
# env key reg valname default subdir of top
paths=(('INCLUDE', 'IncludeDir', 'Include'),
('LIB' , 'LibDir', 'Lib'),
('PATH' , 'BinDir', 'Bin'))
# We are supposed to ignore version if topdir is set, so set
# it to the emptry string if it's not already set.
if version is None:
version = ''
# Each path has a registry entry, use that or default to subdir
for p in paths:
try:
path=get_intel_registry_value(p[1], version, abi)
# These paths may have $(ICInstallDir)
# which needs to be substituted with the topdir.
path=path.replace('$(ICInstallDir)', topdir + os.sep)
except IntelCError:
# Couldn't get it from registry: use default subdir of topdir
env.PrependENVPath(p[0], os.path.join(topdir, p[2]))
else:
env.PrependENVPath(p[0], path.split(os.pathsep))
# print "ICL %s: %s, final=%s"%(p[0], path, str(env['ENV'][p[0]]))
if is_windows:
env['CC'] = 'icl'
env['CXX'] = 'icl'
env['LINK'] = 'xilink'
else:
env['CC'] = 'icc'
env['CXX'] = 'icpc'
# Don't reset LINK here;
# use smart_link which should already be here from link.py.
#env['LINK'] = '$CC'
env['AR'] = 'xiar'
env['LD'] = 'xild' # not used by default
# This is not the exact (detailed) compiler version,
# just the major version as determined above or specified
# by the user. It is a float like 80 or 90, in normalized form for Linux
# (i.e. even for Linux 9.0 compiler, still returns 90 rather than 9.0)
if version:
env['INTEL_C_COMPILER_VERSION']=linux_ver_normalize(version)
if is_windows:
# Look for license file dir
# in system environment, registry, and default location.
envlicdir = os.environ.get("INTEL_LICENSE_FILE", '')
K = ('SOFTWARE\Intel\Licenses')
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
reglicdir = SCons.Util.RegQueryValueEx(k, "w_cpp")[0]
except (AttributeError, SCons.Util.RegError):
reglicdir = ""
defaultlicdir = r'C:\Program Files\Common Files\Intel\Licenses'
licdir = None
for ld in [envlicdir, reglicdir]:
# If the string contains an '@', then assume it's a network
# license (port@system) and good by definition.
if ld and (ld.find('@') != -1 or os.path.exists(ld)):
licdir = ld
break
if not licdir:
licdir = defaultlicdir
if not os.path.exists(licdir):
class ICLLicenseDirWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(ICLLicenseDirWarning)
SCons.Warnings.warn(ICLLicenseDirWarning,
"Intel license dir was not found."
" Tried using the INTEL_LICENSE_FILE environment variable (%s), the registry (%s) and the default path (%s)."
" Using the default path as a last resort."
% (envlicdir, reglicdir, defaultlicdir))
env['ENV']['INTEL_LICENSE_FILE'] = licdir
def exists(env):
if not (is_mac or is_linux or is_windows):
# can't handle this platform
return 0
try:
versions = get_all_compiler_versions()
except (SCons.Util.RegError, IntelCError):
versions = None
detected = versions is not None and len(versions) > 0
if not detected:
# try env.Detect, maybe that will work
if is_windows:
return env.Detect('icl')
elif is_linux:
return env.Detect('icc')
elif is_mac:
return env.Detect('icc')
return detected
# end of file
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
# -*- coding: utf-8 -*-
"""
github3.api
~~~~~~~~~~~
This module provies the core GitHub3 API interface.
"""
from urlparse import urlparse, parse_qs
import requests
from decorator import decorator
from .packages import omnijson as json
from .packages.link_header import parse_link_value
from .models import *
from .helpers import is_collection, to_python, to_api, get_scope
from .config import settings
PAGING_SIZE = 100
class GithubCore(object):
_rate_limit = None
_rate_limit_remaining = None
def __init__(self):
self.session = requests.session()
self.session.params = {'per_page': PAGING_SIZE}
@staticmethod
def _resource_serialize(o):
"""Returns JSON serialization of given object."""
return json.dumps(o)
@staticmethod
def _resource_deserialize(s):
"""Returns dict deserialization of a given JSON string."""
try:
return json.loads(s)
except ValueError:
raise ResponseError('The API Response was not valid.')
@staticmethod
def _generate_url(endpoint):
"""Generates proper endpoint URL."""
if is_collection(endpoint):
resource = map(str, endpoint)
resource = '/'.join(endpoint)
else:
resource = endpoint
return (settings.base_url + resource)
def _requests_post_hook(self, r):
"""Post-processing for HTTP response objects."""
self._ratelimit = int(r.headers.get('x-ratelimit-limit', -1))
self._ratelimit_remaining = int(r.headers.get('x-ratelimit-remaining', -1))
return r
def _http_resource(self, verb, endpoint, params=None, check_status=True, **etc):
url = self._generate_url(endpoint)
args = (verb, url)
if params:
kwargs = {'params': params}
kwargs.update(etc)
else:
kwargs = etc
r = self.session.request(*args, **kwargs)
r = self._requests_post_hook(r)
if check_status:
r.raise_for_status()
return r
def _get_resource(self, resource, obj, **kwargs):
r = self._http_resource('GET', resource, params=kwargs)
item = self._resource_deserialize(r.content)
return obj.new_from_dict(item, gh=self)
def _patch_resource(self, resource, data, **kwargs):
r = self._http_resource('PATCH', resource, data=data, params=kwargs)
msg = self._resource_deserialize(r.content)
return msg
@staticmethod
def _total_pages_from_header(link_header):
if link_header is None:
return link_header
page_info = {}
for link in link_header.split(','):
uri, meta = map(str.strip, link.split(';'))
# Strip <>'s
uri = uri[1:-1]
# Get query params from header.
q = parse_qs(urlparse(uri).query)
meta = meta[5:-1]
page_info[meta] = q
try:
return int(page_info['last']['page'].pop())
except KeyError:
return True
def _get_resources(self, resource, obj, limit=None, **kwargs):
if limit is not None:
assert limit > 0
moar = True
is_truncated = (limit > PAGING_SIZE) or (limit is None)
r_count = 0
page = 1
while moar:
if not is_truncated:
kwargs['per_page'] = limit
moar = False
else:
kwargs['page'] = page
if limit:
if (limit - r_count) < PAGING_SIZE:
kwargs['per_page'] = (limit - r_count)
moar = False
r = self._http_resource('GET', resource, params=kwargs)
max_page = self._total_pages_from_header(r.headers['link'])
if (max_page is True) or (max_page is None):
moar = False
d_items = self._resource_deserialize(r.content)
for item in d_items:
if (r_count < limit) or (limit is None):
r_count += 1
yield obj.new_from_dict(item, gh=self)
else:
moar = False
page += 1
def _to_map(self, obj, iterable):
"""Maps given dict iterable to a given Resource object."""
a = list()
for it in iterable:
a.append(obj.new_from_dict(it, rdd=self))
return a
def _get_url(self, resource):
if is_collection(resource):
resource = map(str, resource)
resource = '/'.join(resource)
return resource
class Github(GithubCore):
"""docstring for Github"""
def __init__(self):
super(Github, self).__init__()
self.is_authenticated = False
def get_user(self, username):
"""Get a single user."""
return self._get_resource(('users', username), User)
def get_me(self):
"""Get the authenticated user."""
return self._get_resource(('user'), CurrentUser)
def get_repo(self, username, reponame):
"""Get the given repo."""
return self._get_resource(('repos', username, reponame), Repo)
def get_org(self, login):
"""Get organization."""
return self._get_resource(('orgs', login), Org)
class ResponseError(Exception):
"""The API Response was unexpected."""
|
|
from rest_framework import serializers
import amo
from addons.models import Category, Preview
from amo.helpers import absolutify
from amo.urlresolvers import reverse
from amo.utils import find_language
from constants.applications import DEVICE_TYPES
from versions.models import Version
import mkt
from mkt.api.fields import ESTranslationSerializerField
from mkt.submit.serializers import SimplePreviewSerializer
from mkt.webapps.models import Geodata, Webapp
from mkt.webapps.utils import (
dehydrate_content_ratings, dehydrate_descriptors, dehydrate_interactives,
filter_content_ratings_by_region)
from mkt.webapps.api import AppSerializer, SimpleAppSerializer
class ESAppSerializer(AppSerializer):
# Fields specific to search.
absolute_url = serializers.SerializerMethodField('get_absolute_url')
is_offline = serializers.BooleanField()
reviewed = serializers.DateField()
# Override previews, because we don't need the full PreviewSerializer.
previews = SimplePreviewSerializer(many=True, source='all_previews')
# Override those, because we want a different source. Also, related fields
# will call self.queryset early if they are not read_only, so force that.
categories = serializers.SlugRelatedField(read_only=True,
many=True, slug_field='slug', source='all_categories')
payment_account = serializers.HyperlinkedRelatedField(read_only=True,
view_name='payment-account-detail', source='payment_account')
manifest_url = serializers.CharField(source='manifest_url')
# Override translations, because we want a different field.
banner_message = ESTranslationSerializerField(
source='geodata.banner_message')
description = ESTranslationSerializerField()
homepage = ESTranslationSerializerField()
name = ESTranslationSerializerField()
release_notes = ESTranslationSerializerField(
source='current_version.releasenotes')
support_email = ESTranslationSerializerField()
support_url = ESTranslationSerializerField()
class Meta(AppSerializer.Meta):
fields = AppSerializer.Meta.fields + ['absolute_url', 'is_offline',
'reviewed']
def __init__(self, *args, **kwargs):
super(ESAppSerializer, self).__init__(*args, **kwargs)
# Remove fields that we don't have in ES or don't want / need to
# support/support in search results at the moment.
self.fields.pop('tags', None)
self.fields.pop('upsold', None)
# Set all fields as read_only just in case.
for field_name in self.fields:
self.fields[field_name].read_only = True
@property
def data(self):
"""
Returns the serialized data on the serializer.
"""
if self._data is None:
if self.many:
self._data = [self.to_native(item) for item in self.object]
else:
self._data = self.to_native(self.object)
return self._data
def field_to_native(self, obj, field_name):
# DRF's field_to_native calls .all(), which we want to avoid, so we
# provide a simplified version that doesn't and just iterates on the
# object list.
return [self.to_native(item) for item in obj.object_list]
def to_native(self, obj):
request = self.context['request']
if request and request.method == 'GET' and 'lang' in request.GET:
# We want to know if the user specifically requested a language,
# in order to return only the relevant translations when that
# happens.
self.requested_language = find_language(request.GET['lang'].lower())
app = self.create_fake_app(obj._source)
return super(ESAppSerializer, self).to_native(app)
def create_fake_app(self, data):
"""Create a fake instance of Webapp and related models from ES data."""
is_packaged = data['app_type'] != amo.ADDON_WEBAPP_HOSTED
is_privileged = data['app_type'] == amo.ADDON_WEBAPP_PRIVILEGED
obj = Webapp(id=data['id'], app_slug=data['app_slug'],
is_packaged=is_packaged, type=amo.ADDON_WEBAPP,
icon_type='image/png')
# Set relations and attributes we need on those relations.
# The properties set on latest_version and current_version differ
# because we are only setting what the serializer is going to need.
# In particular, latest_version.is_privileged needs to be set because
# it's used by obj.app_type_id.
obj.listed_authors = []
obj._current_version = Version()
obj._current_version.addon = obj
obj._current_version._developer_name = data['author']
obj._current_version.supported_locales = data['supported_locales']
obj._current_version.version = data['current_version']
obj._latest_version = Version()
obj._latest_version.is_privileged = is_privileged
obj._geodata = Geodata()
obj.all_categories = [Category(slug=cat) for cat in data['category']]
obj.all_previews = [Preview(id=p['id'], modified=p['modified'],
filetype=p['filetype']) for p in data['previews']]
obj._device_types = [DEVICE_TYPES[d] for d in data['device']]
# Set base attributes on the "fake" app using the data from ES.
# It doesn't mean they'll get exposed in the serializer output, that
# depends on what the fields/exclude attributes in Meta.
for field_name in ('created', 'modified', 'default_locale',
'is_escalated', 'is_offline', 'manifest_url',
'premium_type', 'regions', 'reviewed', 'status',
'weekly_downloads'):
setattr(obj, field_name, data.get(field_name))
# Attach translations for all translated attributes.
for field_name in ('name', 'description', 'homepage', 'support_email',
'support_url'):
ESTranslationSerializerField.attach_translations(obj,
data, field_name)
ESTranslationSerializerField.attach_translations(obj._geodata,
data, 'banner_message')
ESTranslationSerializerField.attach_translations(obj._current_version,
data, 'release_notes', target_name='releasenotes')
# Set attributes that have a different name in ES.
obj.public_stats = data['has_public_stats']
# Avoid a query for payment_account if the app is not premium.
if not obj.is_premium():
obj.payment_account = None
# Override obj.get_region() with a static list of regions generated
# from the region_exclusions stored in ES.
obj.get_regions = obj.get_regions(obj.get_region_ids(restofworld=True,
excluded=data['region_exclusions']))
# Some methods below will need the raw data from ES, put it on obj.
obj.es_data = data
return obj
def get_content_ratings(self, obj):
return filter_content_ratings_by_region({
'ratings': dehydrate_content_ratings(
obj.es_data.get('content_ratings', {})),
'descriptors': dehydrate_descriptors(
obj.es_data.get('content_descriptors', {})),
'interactive_elements': dehydrate_interactives(
obj.es_data.get('interactive_elements', [])),
'regions': mkt.regions.REGION_TO_RATINGS_BODY()
}, region=self.context['request'].REGION.slug)
def get_versions(self, obj):
return dict((v['version'], v['resource_uri'])
for v in obj.es_data['versions'])
def get_ratings_aggregates(self, obj):
return obj.es_data.get('ratings', {})
def get_upsell(self, obj):
upsell = obj.es_data.get('upsell', False)
if upsell:
region_id = self.context['request'].REGION.id
exclusions = upsell.get('region_exclusions')
if exclusions is not None and region_id not in exclusions:
upsell['resource_uri'] = reverse('app-detail',
kwargs={'pk': upsell['id']})
else:
upsell = False
return upsell
def get_absolute_url(self, obj):
return absolutify(obj.get_absolute_url())
class SimpleESAppSerializer(ESAppSerializer):
class Meta(SimpleAppSerializer.Meta):
pass
class SuggestionsESAppSerializer(ESAppSerializer):
class Meta(ESAppSerializer.Meta):
fields = ['name', 'description', 'absolute_url', 'icons']
|
|
import numpy as np
from itertools import cycle
from model import *
from scaletracker import ScaleTracker
from flask import session
# Global variables
EXPONENTS = {1: 0.5, 2: 0.87, 3: 1.25, 4: 1.62, 5: 1.8, 6: 2}
PRE_DEFINED_PARTS = set(['bass', 'synth_pad', 'synth_pluck'])
class Sequencer(object):
"""Abstract sequencer class, not meant to be instantiated"""
def __init__(self, color_id, inst_id, weight=0, steps=64, def_val=False):
self.steps = steps
self.color_id = color_id
self.inst_id = inst_id
self.weight = weight
self.seq_grid = np.full((steps, 5), def_val)
self.def_val = def_val
def __repr__(self):
return '< Sequencer: {} >'.format(self.inst_id)
def clear_grid(self):
"""Clears all sequence data"""
self.seq_grid = np.full((self.steps, 5), self.def_val)
def get_rhythm(self):
""" Select a rhythm from the database by weight """
# Gets all rhythms for the appropriate instrument and color
rhythms = Rhythm.query.filter(Rhythm.inst_id == self.inst_id,
Rhythm.color_id == self.color_id).all()
rhythms.sort(key=lambda x: x.weight, reverse=True)
# Return rhythm if there is only one in list
if len(rhythms) == 1:
return rhythms[0]
# Pick a rhythm by weight.
# Note: all categories of rhythms have one with a weight of 0, so this
# method will always return something
for rhythm in rhythms:
if self.weight >= rhythm.weight:
return rhythm
def get_sound(self):
""" Select a sound from the database by weight """
# Gets all sounds for the appropriate instrument and color
sounds = Synth.query.filter(Synth.inst_id == self.inst_id,
Synth.color_id == self.color_id).all()
# Return rhythm if there is only one in list
if len(sounds) == 1:
return sounds[0]
# Pick a sound by weight
for sound in sounds:
if self.weight >= sound.weight:
return sound
def get_seq_display(self, steps):
""" Saves list of indexes for this seq to the session """
result = []
for step in steps:
if int(step) > 16:
return result
else:
result.append(int(step))
return result
class MelodicSequencer(Sequencer):
"""Pattern-based Sequencer for melodic elements"""
def __init__(self, color_id, inst_id, weight=0):
return super(MelodicSequencer, self).__init__(color_id,
inst_id,
weight,
steps=64,
def_val=' ')
def __repr__(self):
return '< Melodic Sequencer: {} >'.format(self.inst_id)
def generate_sequence(self, seed1, seed2, seed3):
""" Selects a rhythm, then generates a melodic sequence """
rhythm = self.get_rhythm()
# Get pattern data from rhythm object
steps = rhythm.step_ind.split(',')
durations = rhythm.step_dur.split(',')
notes = rhythm.step_note.split(',')
octaves = rhythm.step_octave.split(',')
# Note On/Off
for i in xrange(len(steps)):
self.seq_grid[int(steps[i]), 0] = 'True'
# Durations
for i in xrange(len(steps)):
self.seq_grid[int(steps[i]), 3] = durations[i]
# Note values - will optimize later
if self.inst_id == 'bass':
for i in xrange(len(notes)):
self.seq_grid[int(steps[i]), 2] = scaletracker.bass[int(notes[i])]
elif self.inst_id == 'synth_pluck' or self.inst_id == 'synth_pad':
for i in xrange(len(notes)):
self.seq_grid[int(steps[i]), 2] = scaletracker.arp[int(notes[i])]
else:
for i in xrange(len(notes)):
self.seq_grid[int(steps[i]), 2] = scaletracker.scale[int(notes[i])]
# Octaves
for i in xrange(len(steps)):
self.seq_grid[int(steps[i]), 4] = octaves[i]
# Bend down (currently not utilized)
# Save indexes to session
session[self.inst_id] = self.get_seq_display(steps)
def get_events(self, start):
""" Returns a list of event data """
result = []
label = "{}Part{}".format(self.inst_id, start)
for i in xrange(64):
# Only evaluate line if note is on
if self.seq_grid[i, 0] == 'True':
note = self.seq_grid[i, 2]
dur = self.seq_grid[i, 3]
octave = self.seq_grid[i, 4]
time = "16n*{}".format(i+start)
result.append({'note': note+octave, 'time': time, 'dur': dur})
return (label, result)
def get_part(self):
""" Returns the Tone.js Part for this sequencer """
return ("{0}Part0 = new Tone.Part(function(time, event){{ {0}."
"triggerAttackRelease(event.note, event.dur, time) }}, "
"response['{0}Part0']).start(0);"
"{0}Part64 = new Tone.Part(function (time, event){{ {0}."
"triggerAttackRelease(event.note, event.dur, time) }}, "
"response['{0}Part64']).start(0);"
"{0}Part128 = new Tone.Part(function(time, event){{ {0}."
"triggerAttackRelease(event.note, event.dur, time) }}, "
"response['{0}Part128']).start(0);"
"{0}Part192 = new Tone.Part(function(time, event){{ {0}."
"triggerAttackRelease(event.note, event.dur, time) }}, "
"response['{0}Part192']).start(0);".format(self.inst_id))
def get_instance(self):
""" Returns code to instantiate Tone.js object and its settings """
result = []
# Get the synth sound
sound = self.get_sound()
# Create the instrument
if self.inst_id not in PRE_DEFINED_PARTS:
result.append('var {} = new {};'
.format(self.inst_id, sound.setting))
# result.append('{}.volume.value = -5;'.format(self.inst_id))
result.append('var {0}Part0, {0}Part64, {0}Part128, {0}Part192;'
.format(self.inst_id))
return result
class AtonalSequencer(Sequencer):
"""Pattern-based Sequencer for atonal elements"""
def __repr__(self):
return '< Atonal Sequencer: {} >'.format(self.inst_id)
def generate_sequence(self, seed1, seed2, seed3):
""" Selects a rhythm, then generates a sequence """
rhythm = self.get_rhythm()
# Get pattern data from rhythm object
steps = rhythm.step_ind.split(',')
accents = rhythm.step_dur.split(',')
glitches = rhythm.step_note.split(',')
# Note On/Off
for i in xrange(len(steps)):
self.seq_grid[int(steps[i]), 0] = True
# Accent On/Off
for i in xrange(len(accents)):
self.seq_grid[int(accents[i]), 1] = True
# Glitch On/Off
for i in xrange(len(glitches)):
self.seq_grid[int(glitches[i]), 2] = True
# Bend up (currently not utilized)
# Bend down (currently not utilized)
# Save indexes to session
session[self.inst_id] = self.get_seq_display(steps)
def get_events(self, start):
""" Returns list of event data for Tone.js """
result = []
label = "{}Part{}".format(self.inst_id, start)
for i in xrange(64):
# Only evaluate line if note is on
if self.seq_grid[i, 0]:
on, accent, glitch, bend_up, bend_down = self.seq_grid[i]
time = "16n*{}".format(i+start)
result.append({'note': '0', 'time': time, 'vel': '1'})
return (label, result)
def get_part(self):
""" Returns Tone.js Part for this sequencer """
sound = self.get_sound()
return ("{0} = new Tone.Sampler('{1}', function() {{{0}Part0 = new "
"Tone.Part(function(time, event){{{0}.triggerAttack(event.note,"
" time, event.vel)}}, response['{0}Part0']); {0}Part0.start(0);}}"
").toMaster();"
"{0} = new Tone.Sampler('{1}', function() {{{0}Part64 = new "
"Tone.Part(function(time, event){{{0}.triggerAttack(event.note,"
" time, event.vel)}}, response['{0}Part64']); {0}Part64.start(0);}}"
").toMaster();"
"{0} = new Tone.Sampler('{1}', function() {{{0}Part128 = new "
"Tone.Part(function(time, event){{{0}.triggerAttack(event.note,"
" time,event.vel)}},response['{0}Part128']); {0}Part128.start(0);}}"
").toMaster();"
"{0} = new Tone.Sampler('{1}', function() {{{0}Part192 = new "
"Tone.Part(function(time, event){{{0}.triggerAttack(event.note,"
" time,event.vel)}},response['{0}Part192']); {0}Part192.start(0);}}"
").toMaster();".format(self.inst_id, sound.setting))
def get_instance(self):
""" Returns code to instantiate Tone.js object and its settings """
return ["var {0}, {0}Part0, {0}Part64, {0}Part128, {0}Part192;"
.format(self.inst_id)]
class MelodicSeedSequencer(MelodicSequencer):
"""Seed based Sequencer for melodic elements"""
#Note: not currently utilized
def __repr__(self):
return '< Melodic Seed Sequencer: {} >'.format(self.name)
class AtonalSeedSequencer(AtonalSequencer):
"""Seed based Sequencer for atonal elements"""
def __init__(self, color_id, inst_id, weight=0):
return super(AtonalSeedSequencer, self).__init__(color_id,
inst_id,
weight,
steps=64,
def_val=False)
def __repr__(self):
return '< Atonal Seed Sequencer: {} >'.format(self.inst_id)
def generate_sequence2(self, seed_1, seed_2, seed_3, offset=0):
"""Generate sequence from provided seeds"""
# Note On/Off
for i in xrange(offset, self.steps, seed_1):
# Set note to 'On'
self.seq_grid[i, 0] = True
# Accent On/Off
for i in xrange(offset, self.steps, seed_2):
# Set accents to 'On'
self.seq_grid[i, 1] = True
# Glitch On/Off
for i in xrange(offset, self.steps, seed_3):
# Set accents to 'On'
self.seq_grid[i, 2] = True
# Bend up (currently not utilized)
# Bend down (currently not utilized)
def generate_sequence(self, seed_1, seed_2, seed_3, offset=0):
""" Generates alternate sequence by using seeds to repeat notes """
seq = ([True] * seed_1) + ([False] * seed_2)
loop = cycle(seq)
for i in xrange(self.steps):
self.seq_grid[i, 0] = loop.next()
for i in xrange(offset, self.steps, 2):
self.seq_grid[i, 1] = True
#### Helper functions ##########################################################
def build_sequencers():
""" Builds all sequencers for song """
# Dictionary of sequencers to build
build_list = {'MelodicSequencer': ['bass',
'synth_pluck',
'synth_pad'],
'AtonalSequencer': ['kick_drum',
'snare_drum',
'open_hihat',
'closed_hihat'],
'AtonalSeedSequencer': ['ride_cymbal']}
sequencers = []
# Instantiate sequencer objects
# Note: keeping these separate in case different parameters are needed.
if build_list.get('MelodicSequencer'):
for seq_type in build_list['MelodicSequencer']:
sequencers.append(MelodicSequencer(session['color'],
seq_type,
session['weight_avg']))
if build_list.get('AtonalSequencer'):
for seq_type in build_list['AtonalSequencer']:
sequencers.append(AtonalSequencer(session['color'],
seq_type,
session['weight_avg']))
if build_list.get('AtonalSeedSequencer'):
for seq_type in build_list['AtonalSeedSequencer']:
sequencers.append(AtonalSeedSequencer(session['color'],
seq_type,
session['weight_avg']))
return sequencers
def write_sequences(sequencers, start):
""" Returns JavaScript code from all sequencers """
result = []
for sequencer in sequencers:
sequencer.generate_sequence(session['seed_1'],
session['seed_2'],
session['seed_3'])
result.append(sequencer.get_events(start))
return result
def get_music(vert, initial_key, sequencers):
""" Returns code to play sequences from all Melodic Sequencers """
result = []
# Get JavaScript to play sequences from melodic sequencers,
# Loops 4 times; one for each chord in progression
for i in xrange(4):
# Get weight from img data, then get next vertex by weight
vert = vert.get_next_vert(session['weights'][i])
# Get new key; (looks at the 'chord' attribute of the vert object, which
# is the index to look up in the 'prog' attribute of initial_key)
key_val = initial_key.prog.split(',')[vert.chord]
# Set new key and mode
scaletracker.set_scale(key_val, vert.mode_id)
# Save info to session for display
session['vert'+str(i)] = ("{} {}"
.format(scaletracker.key,
vert.mode_id))
# write JS
result.extend(write_sequences(sequencers, i*64,))
return result
def get_tempo():
""" Returns initial tempo. Higher contrast, higher tempo """
base_tempos = {'red': [120, 20],
'blue': [120, 35],
'green': [140, 15]}
base = base_tempos[session['color']]
data = sorted(session['weights'])
multiplier = data[-1] - data[0]
return base[0] + (base[1] * multiplier)
def get_instances(sequencers):
""" Returns code to instantiate Tone.js objects """
result = []
for sequencer in sequencers:
result.extend(sequencer.get_instance())
return result
def get_parts(sequencers):
""" Returns Tone.js parts for each sequencer """
result = []
for sequencer in sequencers:
result.append(sequencer.get_part())
return result
def build_part_list(sequencers):
""" Makes part list for front-end """
result = []
for sequencer in sequencers:
result.append(sequencer.inst_id)
return result
#### Active Code ###############################################################
# Scaletracker must be instantiated in this file to be read by all sequencers
scaletracker = ScaleTracker()
if __name__ == "__main__":
""" Imports DB functions to test in interactive mode """
# from flask_sqlalchemy import SQLAlchemy
from server import app
from model import *
connect_to_db(app)
print "Connected to DB."
|
|
# -*- coding: utf-8 -*-
"""
logbook._fallback
~~~~~~~~~~~~~~~~~
Fallback implementations in case speedups is not around.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
from itertools import count
from logbook.helpers import get_iterator_next_method
from logbook.concurrency import (
thread_get_ident, greenlet_get_ident, thread_local, greenlet_local,
ThreadLock, GreenletRLock, is_gevent_enabled)
_missing = object()
_MAX_CONTEXT_OBJECT_CACHE = 256
def group_reflected_property(name, default, fallback=_missing):
"""Returns a property for a given name that falls back to the
value of the group if set. If there is no such group, the
provided default is used.
"""
def _get(self):
rv = getattr(self, '_' + name, _missing)
if rv is not _missing and rv != fallback:
return rv
if self.group is None:
return default
return getattr(self.group, name)
def _set(self, value):
setattr(self, '_' + name, value)
def _del(self):
delattr(self, '_' + name)
return property(_get, _set, _del)
class _StackBound(object):
def __init__(self, obj, push, pop):
self.__obj = obj
self.__push = push
self.__pop = pop
def __enter__(self):
self.__push()
return self.__obj
def __exit__(self, exc_type, exc_value, tb):
self.__pop()
class StackedObject(object):
"""Baseclass for all objects that provide stack manipulation
operations.
"""
def push_greenlet(self):
"""Pushes the stacked object to the greenlet stack."""
raise NotImplementedError()
def pop_greenlet(self):
"""Pops the stacked object from the greenlet stack."""
raise NotImplementedError()
def push_thread(self):
"""Pushes the stacked object to the thread stack."""
raise NotImplementedError()
def pop_thread(self):
"""Pops the stacked object from the thread stack."""
raise NotImplementedError()
def push_application(self):
"""Pushes the stacked object to the application stack."""
raise NotImplementedError()
def pop_application(self):
"""Pops the stacked object from the application stack."""
raise NotImplementedError()
def __enter__(self):
if is_gevent_enabled():
self.push_greenlet()
else:
self.push_thread()
return self
def __exit__(self, exc_type, exc_value, tb):
if is_gevent_enabled():
self.pop_greenlet()
else:
self.pop_thread()
def greenletbound(self, _cls=_StackBound):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the greenlet.
"""
return _cls(self, self.push_greenlet, self.pop_greenlet)
def threadbound(self, _cls=_StackBound):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the thread.
"""
return _cls(self, self.push_thread, self.pop_thread)
def applicationbound(self, _cls=_StackBound):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the application.
"""
return _cls(self, self.push_application, self.pop_application)
class ContextStackManager(object):
"""Helper class for context objects that manages a stack of
objects.
"""
def __init__(self):
self._global = []
self._thread_context_lock = ThreadLock()
self._thread_context = thread_local()
self._greenlet_context_lock = GreenletRLock()
self._greenlet_context = greenlet_local()
self._cache = {}
self._stackop = get_iterator_next_method(count())
def iter_context_objects(self):
"""Returns an iterator over all objects for the combined
application and context cache.
"""
use_gevent = is_gevent_enabled()
tid = greenlet_get_ident() if use_gevent else thread_get_ident()
objects = self._cache.get(tid)
if objects is None:
if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE:
self._cache.clear()
objects = self._global[:]
objects.extend(getattr(self._thread_context, 'stack', ()))
if use_gevent:
objects.extend(getattr(self._greenlet_context, 'stack', ()))
objects.sort(reverse=True)
objects = [x[1] for x in objects]
self._cache[tid] = objects
return iter(objects)
def push_greenlet(self, obj):
self._greenlet_context_lock.acquire()
try:
# remote chance to conflict with thread ids
self._cache.pop(greenlet_get_ident(), None)
item = (self._stackop(), obj)
stack = getattr(self._greenlet_context, 'stack', None)
if stack is None:
self._greenlet_context.stack = [item]
else:
stack.append(item)
finally:
self._greenlet_context_lock.release()
def pop_greenlet(self):
self._greenlet_context_lock.acquire()
try:
# remote chance to conflict with thread ids
self._cache.pop(greenlet_get_ident(), None)
stack = getattr(self._greenlet_context, 'stack', None)
assert stack, 'no objects on stack'
return stack.pop()[1]
finally:
self._greenlet_context_lock.release()
def push_thread(self, obj):
self._thread_context_lock.acquire()
try:
self._cache.pop(thread_get_ident(), None)
item = (self._stackop(), obj)
stack = getattr(self._thread_context, 'stack', None)
if stack is None:
self._thread_context.stack = [item]
else:
stack.append(item)
finally:
self._thread_context_lock.release()
def pop_thread(self):
self._thread_context_lock.acquire()
try:
self._cache.pop(thread_get_ident(), None)
stack = getattr(self._thread_context, 'stack', None)
assert stack, 'no objects on stack'
return stack.pop()[1]
finally:
self._thread_context_lock.release()
def push_application(self, obj):
self._global.append((self._stackop(), obj))
self._cache.clear()
def pop_application(self):
assert self._global, 'no objects on application stack'
popped = self._global.pop()[1]
self._cache.clear()
return popped
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dns_stats(base_resource) :
def __init__(self) :
self._clearstats = ""
self._dnstotqueries = 0
self._dnsqueriesrate = 0
self._dnstotmultiquery = 0
self._dnstotanswers = 0
self._dnsanswersrate = 0
self._dnstotserverresponse = 0
self._dnsserverresponserate = 0
self._dnstotrecupdate = 0
self._dnstotauthans = 0
self._dnstotserverquery = 0
self._dnsserverqueryrate = 0
self._dnstotcacheflush = 0
self._dnstotcacheentriesflush = 0
self._dnscurnoauthentries = 0
self._dnscurauthentries = 0
self._dnstotauthnonames = 0
self._dnstotunsupportedresponseclass = 0
self._dnstotinvalidqueryformat = 0
self._dnstotstrayanswer = 0
self._dnstotresponsebadlen = 0
self._dnstotreqrefusals = 0
self._dnserrnullattack = 0
self._dnstotunsupportedresponsetype = 0
self._dnstotunsupportedqueryclass = 0
self._dnstotnonauthnodatas = 0
self._dnstotnodataresps = 0
self._dnstotmultiquerydisableerror = 0
self._dnstotothererrors = 0
self._dns64totqueries = 0
self._dns64queriesrate = 0
self._dns64totanswers = 0
self._dns64answersrate = 0
self._dns64totrwanswers = 0
self._dns64rwanswersrate = 0
self._dns64totresponses = 0
self._dns64responsesrate = 0
self._dns64totgslbqueries = 0
self._dns64gslbqueriesrate = 0
self._dns64totgslbanswers = 0
self._dns64gslbanswersrate = 0
self._dns64tottcanswers = 0
self._dns64tcanswersrate = 0
self._dns64totsvraqueries = 0
self._dns64svraqueriesrate = 0
self._dns64totaaaabypass = 0
self._dns64aaaabypassrate = 0
self._dns64tottcpqueries = 0
self._dns64tcpqueriesrate = 0
self._dns64activepolicies = 0
self._dns64totnodataresp = 0
self._dns64nodataresprate = 0
self._dnstotnsrecqueries = 0
self._dnsnsrecqueriesrate = 0
self._dnstotsoarecqueries = 0
self._dnssoarecqueriesrate = 0
self._dnstotptrrecqueries = 0
self._dnsptrrecqueriesrate = 0
self._dnstotsrvrecqueries = 0
self._dnssrvrecqueriesrate = 0
self._dnstotaresponse = 0
self._dnsaresponserate = 0
self._dnstotcnameresponse = 0
self._dnscnameresponserate = 0
self._dnstotmxresponse = 0
self._dnsmxresponserate = 0
self._dnstotanyresponse = 0
self._dnsanyresponserate = 0
self._dnstotnsrecupdate = 0
self._dnstotsoarecupdate = 0
self._dnstotptrrecupdate = 0
self._dnstotsrvrecupdate = 0
self._dnstotaaaarecqueries = 0
self._dnsaaaarecqueriesrate = 0
self._dnstotarecqueries = 0
self._dnsarecqueriesrate = 0
self._dnstotcnamerecqueries = 0
self._dnscnamerecqueriesrate = 0
self._dnstotmxrecqueries = 0
self._dnsmxrecqueriesrate = 0
self._dnstotanyqueries = 0
self._dnsanyqueriesrate = 0
self._dnstotaaaaresponse = 0
self._dnsaaaaresponserate = 0
self._dnstotnsresponse = 0
self._dnsnsresponserate = 0
self._dnstotsoaresponse = 0
self._dnssoaresponserate = 0
self._dnstotptrresponse = 0
self._dnsptrresponserate = 0
self._dnstotsrvresponse = 0
self._dnssrvresponserate = 0
self._dnstotaaaarecupdate = 0
self._dnstotarecupdate = 0
self._dnstotmxrecupdate = 0
self._dnstotcnamerecupdate = 0
self._dnscuraaaarecord = 0
self._dnscurarecord = 0
self._dnscurmxrecord = 0
self._dnscurcnamerecord = 0
self._dnscurnsrecord = 0
self._dnscursoarecord = 0
self._dnscurptrrecord = 0
self._dnscursrvrecord = 0
self._dnstotaaaarecfailed = 0
self._dnstotarecfailed = 0
self._dnstotmxrecfailed = 0
self._dnstotptrrecfailed = 0
self._dnstotnsrecfailed = 0
self._dnstotcnamerecfailed = 0
self._dnstotsoarecfailed = 0
self._dnstotsrvrecfailed = 0
self._dnstotanyrecfailed = 0
self._dnstotunsupportedqueries = 0
@property
def clearstats(self) :
ur"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
ur"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def dnstotcnameresponse(self) :
ur"""Total number of CNAME responses received.
"""
try :
return self._dnstotcnameresponse
except Exception as e:
raise e
@property
def dnsaresponserate(self) :
ur"""Rate (/s) counter for dnstotaresponse.
"""
try :
return self._dnsaresponserate
except Exception as e:
raise e
@property
def dns64rwanswersrate(self) :
ur"""Rate (/s) counter for dns64totrwanswers.
"""
try :
return self._dns64rwanswersrate
except Exception as e:
raise e
@property
def dnstotaaaarecupdate(self) :
ur"""Total number of AAAA record updates.
"""
try :
return self._dnstotaaaarecupdate
except Exception as e:
raise e
@property
def dnstotptrrecupdate(self) :
ur"""Total number of PTR record updates.
"""
try :
return self._dnstotptrrecupdate
except Exception as e:
raise e
@property
def dns64nodataresprate(self) :
ur"""Rate (/s) counter for dns64totnodataresp.
"""
try :
return self._dns64nodataresprate
except Exception as e:
raise e
@property
def dnserrnullattack(self) :
ur"""Total number of queries received where all the counts are 0.
"""
try :
return self._dnserrnullattack
except Exception as e:
raise e
@property
def dnstotunsupportedqueries(self) :
ur"""Total number of requests for which query type requested was unsupported.
"""
try :
return self._dnstotunsupportedqueries
except Exception as e:
raise e
@property
def dnstotauthnonames(self) :
ur"""Number of queries for which no record was found.
"""
try :
return self._dnstotauthnonames
except Exception as e:
raise e
@property
def dnstotmxrecupdate(self) :
ur"""Total number of MX record updates.
"""
try :
return self._dnstotmxrecupdate
except Exception as e:
raise e
@property
def dnstotanyqueries(self) :
ur"""Total number of ANY queries received.
"""
try :
return self._dnstotanyqueries
except Exception as e:
raise e
@property
def dns64totrwanswers(self) :
ur"""Total number of DNS64 answers served after rewriting the response.
"""
try :
return self._dns64totrwanswers
except Exception as e:
raise e
@property
def dnstotstrayanswer(self) :
ur"""Total number of stray answers.
"""
try :
return self._dnstotstrayanswer
except Exception as e:
raise e
@property
def dnstotptrrecfailed(self) :
ur"""Total number of times PTR record lookup failed.
"""
try :
return self._dnstotptrrecfailed
except Exception as e:
raise e
@property
def dns64activepolicies(self) :
ur"""Total number of active dns64 policies.
"""
try :
return self._dns64activepolicies
except Exception as e:
raise e
@property
def dnstotcnamerecupdate(self) :
ur"""Total number of CNAME record updates.
"""
try :
return self._dnstotcnamerecupdate
except Exception as e:
raise e
@property
def dnscursoarecord(self) :
ur"""Total number of SOA records.
"""
try :
return self._dnscursoarecord
except Exception as e:
raise e
@property
def dnstotcacheentriesflush(self) :
ur"""Total number of cache entries flushed.
"""
try :
return self._dnstotcacheentriesflush
except Exception as e:
raise e
@property
def dnsaaaaresponserate(self) :
ur"""Rate (/s) counter for dnstotaaaaresponse.
"""
try :
return self._dnsaaaaresponserate
except Exception as e:
raise e
@property
def dnsaaaarecqueriesrate(self) :
ur"""Rate (/s) counter for dnstotaaaarecqueries.
"""
try :
return self._dnsaaaarecqueriesrate
except Exception as e:
raise e
@property
def dnstotmxrecqueries(self) :
ur"""Total number of MX queries received.
"""
try :
return self._dnstotmxrecqueries
except Exception as e:
raise e
@property
def dnstotsoarecqueries(self) :
ur"""Total number of SOA queries received.
"""
try :
return self._dnstotsoarecqueries
except Exception as e:
raise e
@property
def dnscurarecord(self) :
ur"""Total number of A records.
"""
try :
return self._dnscurarecord
except Exception as e:
raise e
@property
def dnstotinvalidqueryformat(self) :
ur"""Total number of queries whose format was invalid.
"""
try :
return self._dnstotinvalidqueryformat
except Exception as e:
raise e
@property
def dnstotsrvrecfailed(self) :
ur"""Total number of times SRV record lookup failed.
"""
try :
return self._dnstotsrvrecfailed
except Exception as e:
raise e
@property
def dnsarecqueriesrate(self) :
ur"""Rate (/s) counter for dnstotarecqueries.
"""
try :
return self._dnsarecqueriesrate
except Exception as e:
raise e
@property
def dnstotsoaresponse(self) :
ur"""Total number of SOA responses received.
"""
try :
return self._dnstotsoaresponse
except Exception as e:
raise e
@property
def dnstotserverresponse(self) :
ur"""Total number of Server responses received.
"""
try :
return self._dnstotserverresponse
except Exception as e:
raise e
@property
def dnsanyqueriesrate(self) :
ur"""Rate (/s) counter for dnstotanyqueries.
"""
try :
return self._dnsanyqueriesrate
except Exception as e:
raise e
@property
def dnscurauthentries(self) :
ur"""Total number of authoritative entries.
"""
try :
return self._dnscurauthentries
except Exception as e:
raise e
@property
def dnstotmxresponse(self) :
ur"""Total number of MX responses received.
"""
try :
return self._dnstotmxresponse
except Exception as e:
raise e
@property
def dnstotptrrecqueries(self) :
ur"""Total number of PTR queries received.
"""
try :
return self._dnstotptrrecqueries
except Exception as e:
raise e
@property
def dnstotunsupportedqueryclass(self) :
ur"""Total number of queries for which query class was unsupported.
"""
try :
return self._dnstotunsupportedqueryclass
except Exception as e:
raise e
@property
def dnsptrresponserate(self) :
ur"""Rate (/s) counter for dnstotptrresponse.
"""
try :
return self._dnsptrresponserate
except Exception as e:
raise e
@property
def dnssoarecqueriesrate(self) :
ur"""Rate (/s) counter for dnstotsoarecqueries.
"""
try :
return self._dnssoarecqueriesrate
except Exception as e:
raise e
@property
def dns64totgslbanswers(self) :
ur"""Total number of DNS64 queries served.
"""
try :
return self._dns64totgslbanswers
except Exception as e:
raise e
@property
def dns64answersrate(self) :
ur"""Rate (/s) counter for dns64totanswers.
"""
try :
return self._dns64answersrate
except Exception as e:
raise e
@property
def dns64totqueries(self) :
ur"""Total number of DNS64 queries recieved.
"""
try :
return self._dns64totqueries
except Exception as e:
raise e
@property
def dnsptrrecqueriesrate(self) :
ur"""Rate (/s) counter for dnstotptrrecqueries.
"""
try :
return self._dnsptrrecqueriesrate
except Exception as e:
raise e
@property
def dnstotsoarecfailed(self) :
ur"""Total number of times SOA record lookup failed.
"""
try :
return self._dnstotsoarecfailed
except Exception as e:
raise e
@property
def dns64tottcpqueries(self) :
ur"""Total number of dns64 queries over TCP.
"""
try :
return self._dns64tottcpqueries
except Exception as e:
raise e
@property
def dnstotaaaarecqueries(self) :
ur"""Total number of AAAA queries received.
"""
try :
return self._dnstotaaaarecqueries
except Exception as e:
raise e
@property
def dns64responsesrate(self) :
ur"""Rate (/s) counter for dns64totresponses.
"""
try :
return self._dns64responsesrate
except Exception as e:
raise e
@property
def dnstotmxrecfailed(self) :
ur"""Total number of times MX record lookup failed.
"""
try :
return self._dnstotmxrecfailed
except Exception as e:
raise e
@property
def dns64tottcanswers(self) :
ur"""Total number of Answers served with TC bit set in DNS64 context.
"""
try :
return self._dns64tottcanswers
except Exception as e:
raise e
@property
def dnstotaaaarecfailed(self) :
ur"""Total number of times AAAA record lookup failed.
"""
try :
return self._dnstotaaaarecfailed
except Exception as e:
raise e
@property
def dnssrvresponserate(self) :
ur"""Rate (/s) counter for dnstotsrvresponse.
"""
try :
return self._dnssrvresponserate
except Exception as e:
raise e
@property
def dnsnsrecqueriesrate(self) :
ur"""Rate (/s) counter for dnstotnsrecqueries.
"""
try :
return self._dnsnsrecqueriesrate
except Exception as e:
raise e
@property
def dnstotserverquery(self) :
ur"""Total number of Server queries sent.
"""
try :
return self._dnstotserverquery
except Exception as e:
raise e
@property
def dnssoaresponserate(self) :
ur"""Rate (/s) counter for dnstotsoaresponse.
"""
try :
return self._dnssoaresponserate
except Exception as e:
raise e
@property
def dnstotmultiquery(self) :
ur"""Total number of Multi Query request received.
"""
try :
return self._dnstotmultiquery
except Exception as e:
raise e
@property
def dnscuraaaarecord(self) :
ur"""Total number of AAAA records.
"""
try :
return self._dnscuraaaarecord
except Exception as e:
raise e
@property
def dnsqueriesrate(self) :
ur"""Rate (/s) counter for dnstotqueries.
"""
try :
return self._dnsqueriesrate
except Exception as e:
raise e
@property
def dns64gslbqueriesrate(self) :
ur"""Rate (/s) counter for dns64totgslbqueries.
"""
try :
return self._dns64gslbqueriesrate
except Exception as e:
raise e
@property
def dnsanyresponserate(self) :
ur"""Rate (/s) counter for dnstotanyresponse.
"""
try :
return self._dnsanyresponserate
except Exception as e:
raise e
@property
def dnsanswersrate(self) :
ur"""Rate (/s) counter for dnstotanswers.
"""
try :
return self._dnsanswersrate
except Exception as e:
raise e
@property
def dnstotarecupdate(self) :
ur"""Total number of A record updates.
"""
try :
return self._dnstotarecupdate
except Exception as e:
raise e
@property
def dnscnameresponserate(self) :
ur"""Rate (/s) counter for dnstotcnameresponse.
"""
try :
return self._dnscnameresponserate
except Exception as e:
raise e
@property
def dnstotothererrors(self) :
ur"""Total number of other errors. .
"""
try :
return self._dnstotothererrors
except Exception as e:
raise e
@property
def dnstotnsrecfailed(self) :
ur"""Total number of times NS record lookup failed.
"""
try :
return self._dnstotnsrecfailed
except Exception as e:
raise e
@property
def dnscurcnamerecord(self) :
ur"""Total number of CNAME records.
"""
try :
return self._dnscurcnamerecord
except Exception as e:
raise e
@property
def dnscurnoauthentries(self) :
ur"""Total number of non-authoritative entries.
"""
try :
return self._dnscurnoauthentries
except Exception as e:
raise e
@property
def dnstotresponsebadlen(self) :
ur"""Number of DNS responses received with invalid resoure data length.
"""
try :
return self._dnstotresponsebadlen
except Exception as e:
raise e
@property
def dns64totaaaabypass(self) :
ur"""Total number of times AAAA query has been bypassed in DNS64 trnsaction.
"""
try :
return self._dns64totaaaabypass
except Exception as e:
raise e
@property
def dns64tcpqueriesrate(self) :
ur"""Rate (/s) counter for dns64tottcpqueries.
"""
try :
return self._dns64tcpqueriesrate
except Exception as e:
raise e
@property
def dnstotaaaaresponse(self) :
ur"""Total number of AAAA responses received.
"""
try :
return self._dnstotaaaaresponse
except Exception as e:
raise e
@property
def dns64gslbanswersrate(self) :
ur"""Rate (/s) counter for dns64totgslbanswers.
"""
try :
return self._dns64gslbanswersrate
except Exception as e:
raise e
@property
def dnstotunsupportedresponsetype(self) :
ur"""Total number of responses for which response type requested was unsupported.
"""
try :
return self._dnstotunsupportedresponsetype
except Exception as e:
raise e
@property
def dns64totsvraqueries(self) :
ur"""Total number of Queries sent by DNS64 module to backend.
"""
try :
return self._dns64totsvraqueries
except Exception as e:
raise e
@property
def dns64totresponses(self) :
ur"""Total number of responses recieved from backend in DNS64 context.
"""
try :
return self._dns64totresponses
except Exception as e:
raise e
@property
def dnstotnsrecqueries(self) :
ur"""Total number of NS queries received.
"""
try :
return self._dnstotnsrecqueries
except Exception as e:
raise e
@property
def dns64totanswers(self) :
ur"""Total number of DNS64 answers served.
"""
try :
return self._dns64totanswers
except Exception as e:
raise e
@property
def dnscursrvrecord(self) :
ur"""Total number of SRV records.
"""
try :
return self._dnscursrvrecord
except Exception as e:
raise e
@property
def dnscurptrrecord(self) :
ur"""Total number of PTR records.
"""
try :
return self._dnscurptrrecord
except Exception as e:
raise e
@property
def dnstotanyresponse(self) :
ur"""Total number of ANY responses received.
"""
try :
return self._dnstotanyresponse
except Exception as e:
raise e
@property
def dnstotanyrecfailed(self) :
ur"""Total number of times ANY query lookup failed.
"""
try :
return self._dnstotanyrecfailed
except Exception as e:
raise e
@property
def dns64aaaabypassrate(self) :
ur"""Rate (/s) counter for dns64totaaaabypass.
"""
try :
return self._dns64aaaabypassrate
except Exception as e:
raise e
@property
def dnstotnsresponse(self) :
ur"""Total number of NS responses received.
"""
try :
return self._dnstotnsresponse
except Exception as e:
raise e
@property
def dnssrvrecqueriesrate(self) :
ur"""Rate (/s) counter for dnstotsrvrecqueries.
"""
try :
return self._dnssrvrecqueriesrate
except Exception as e:
raise e
@property
def dnstotnsrecupdate(self) :
ur"""Total number of NS record updates.
"""
try :
return self._dnstotnsrecupdate
except Exception as e:
raise e
@property
def dnstotcnamerecqueries(self) :
ur"""Total number of CNAME queries received.
"""
try :
return self._dnstotcnamerecqueries
except Exception as e:
raise e
@property
def dnstotmultiquerydisableerror(self) :
ur"""Total number of times a multi query was disabled and received a multi query.
"""
try :
return self._dnstotmultiquerydisableerror
except Exception as e:
raise e
@property
def dnstotarecqueries(self) :
ur"""Total number of A queries received.
"""
try :
return self._dnstotarecqueries
except Exception as e:
raise e
@property
def dnsserverresponserate(self) :
ur"""Rate (/s) counter for dnstotserverresponse.
"""
try :
return self._dnsserverresponserate
except Exception as e:
raise e
@property
def dnsnsresponserate(self) :
ur"""Rate (/s) counter for dnstotnsresponse.
"""
try :
return self._dnsnsresponserate
except Exception as e:
raise e
@property
def dnstotanswers(self) :
ur"""Total number of DNS responses received.
"""
try :
return self._dnstotanswers
except Exception as e:
raise e
@property
def dnsmxrecqueriesrate(self) :
ur"""Rate (/s) counter for dnstotmxrecqueries.
"""
try :
return self._dnsmxrecqueriesrate
except Exception as e:
raise e
@property
def dnstotcnamerecfailed(self) :
ur"""Total number of times CNAME record lookup failed.
"""
try :
return self._dnstotcnamerecfailed
except Exception as e:
raise e
@property
def dnstotsrvrecqueries(self) :
ur"""Total number of SRV queries received.
"""
try :
return self._dnstotsrvrecqueries
except Exception as e:
raise e
@property
def dnstotaresponse(self) :
ur"""Total number of A responses received.
"""
try :
return self._dnstotaresponse
except Exception as e:
raise e
@property
def dnscnamerecqueriesrate(self) :
ur"""Rate (/s) counter for dnstotcnamerecqueries.
"""
try :
return self._dnscnamerecqueriesrate
except Exception as e:
raise e
@property
def dns64totnodataresp(self) :
ur"""Total number of responses recieved from backend with ancount 0.
"""
try :
return self._dns64totnodataresp
except Exception as e:
raise e
@property
def dnstotqueries(self) :
ur"""Total number of DNS queries received.
"""
try :
return self._dnstotqueries
except Exception as e:
raise e
@property
def dnstotsrvresponse(self) :
ur"""Total number of SRV responses received.
"""
try :
return self._dnstotsrvresponse
except Exception as e:
raise e
@property
def dnstotunsupportedresponseclass(self) :
ur"""Total number of responses for which response types were unsupported.
"""
try :
return self._dnstotunsupportedresponseclass
except Exception as e:
raise e
@property
def dnstotcacheflush(self) :
ur"""Total number of times cache was flushed.
"""
try :
return self._dnstotcacheflush
except Exception as e:
raise e
@property
def dnstotarecfailed(self) :
ur"""Total number of times A record lookup failed.
"""
try :
return self._dnstotarecfailed
except Exception as e:
raise e
@property
def dnstotsrvrecupdate(self) :
ur"""Total number of SRV record updates.
"""
try :
return self._dnstotsrvrecupdate
except Exception as e:
raise e
@property
def dns64svraqueriesrate(self) :
ur"""Rate (/s) counter for dns64totsvraqueries.
"""
try :
return self._dns64svraqueriesrate
except Exception as e:
raise e
@property
def dnsserverqueryrate(self) :
ur"""Rate (/s) counter for dnstotserverquery.
"""
try :
return self._dnsserverqueryrate
except Exception as e:
raise e
@property
def dnscurnsrecord(self) :
ur"""Total number of NS records.
"""
try :
return self._dnscurnsrecord
except Exception as e:
raise e
@property
def dnstotsoarecupdate(self) :
ur"""Total number of SOA record updates.
"""
try :
return self._dnstotsoarecupdate
except Exception as e:
raise e
@property
def dns64queriesrate(self) :
ur"""Rate (/s) counter for dns64totqueries.
"""
try :
return self._dns64queriesrate
except Exception as e:
raise e
@property
def dnstotnonauthnodatas(self) :
ur"""Total number of responses for which there was a format error.
"""
try :
return self._dnstotnonauthnodatas
except Exception as e:
raise e
@property
def dnstotauthans(self) :
ur"""Number of queries which were authoritatively answered.
"""
try :
return self._dnstotauthans
except Exception as e:
raise e
@property
def dnstotreqrefusals(self) :
ur"""Number of DNS requests refused.
"""
try :
return self._dnstotreqrefusals
except Exception as e:
raise e
@property
def dnscurmxrecord(self) :
ur"""Total number of MX records.
"""
try :
return self._dnscurmxrecord
except Exception as e:
raise e
@property
def dns64tcanswersrate(self) :
ur"""Rate (/s) counter for dns64tottcanswers.
"""
try :
return self._dns64tcanswersrate
except Exception as e:
raise e
@property
def dnstotrecupdate(self) :
ur"""Total number of record updates.
"""
try :
return self._dnstotrecupdate
except Exception as e:
raise e
@property
def dns64totgslbqueries(self) :
ur"""Total number of DNS64 queries for GSLB domain.
"""
try :
return self._dns64totgslbqueries
except Exception as e:
raise e
@property
def dnstotnodataresps(self) :
ur"""Number of DNS responses received without answer.
"""
try :
return self._dnstotnodataresps
except Exception as e:
raise e
@property
def dnstotptrresponse(self) :
ur"""Total number of PTR responses received.
"""
try :
return self._dnstotptrresponse
except Exception as e:
raise e
@property
def dnsmxresponserate(self) :
ur"""Rate (/s) counter for dnstotmxresponse.
"""
try :
return self._dnsmxresponserate
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dns_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dns
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
ur""" Use this API to fetch the statistics of all dns_stats resources that are configured on netscaler.
"""
try :
obj = dns_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class dns_response(base_response) :
def __init__(self, length=1) :
self.dns = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dns = [dns_stats() for _ in range(length)]
|
|
# Copyright 2014 OneConvergence, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import httplib
import requests
import urlparse
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from gbpservice.neutron.services.grouppolicy.common import exceptions
SERVICE_CONTROLLER_OPTIONS = [
cfg.StrOpt('service_controller_ip',
help=_('One Convergence NVSD Service Controller IP Address')),
cfg.StrOpt('service_controller_port',
help=_('One Convergence NVSD Service Controller Port Number')),
cfg.StrOpt('request_retries',
help=_('One Convergence NVSD Service Controller API '
'request retries')),
cfg.StrOpt('request_timeout',
help=_('One Convergence NVSD Service Controller API '
'request timeout')),
cfg.StrOpt('api_version',
default='1.0',
help=_('One Convergence NVSD Service Controller API Version')),
]
cfg.CONF.register_opts(SERVICE_CONTROLLER_OPTIONS, "NVSD_SERVICE_CONTROLLER")
LOG = logging.getLogger(__name__)
NVSD_ENDPOINT = "/nvsd_connectivity_port"
NVSD_ENDPOINT_GROUP = "/nvsd_connectivity_portgroup"
NVSD_CONTRACT = "/nvsd_connectivity_contract"
NVSD_POLICY = "/nvsd_connectivity_policy"
NVSD_POLICY_ACTION = "/nvsd_connectivity_action"
NVSD_POLICY_CLASSIFIER = "/nvsd_connectivity_classifier"
NVSD_POLICY_RULE = "/nvsd_connectivity_rule"
NVSD_SERVICE = "/service"
ADMIN_URL = "&is_admin=true"
API_TENANT_USER = "?tenant_id=%s&user_id=%s"
class GroupPolicyException(exceptions.GroupPolicyException):
"""Base for policy driver exceptions returned to user."""
message = _("Unexpected response code %(status)s from NVSD "
"Service Controller for %(method)s to %(url)s")
class NVSDServiceController(object):
"""Encapsulates the One Convergence NVSD Service Controller details.
Uses python-requests library to perform API request to One Convergence
NVSD Service Controller.
"""
def __init__(self):
self._host = cfg.CONF.NVSD_SERVICE_CONTROLLER.service_controller_ip
self._port = cfg.CONF.NVSD_SERVICE_CONTROLLER.service_controller_port
self._retries = cfg.CONF.NVSD_SERVICE_CONTROLLER.request_retries
self._request_timeout = float(cfg.CONF.NVSD_SERVICE_CONTROLLER.
request_timeout)
self.service_api_url = 'http://' + self._host + ':' + str(self._port)
self.pool = requests.Session()
def do_request(self, method, url=None, headers=None, data=None,
timeout=10):
response = self.pool.request(method, url=url,
headers=headers, data=data,
timeout=timeout)
return response
def request(self, method, uri, context, body="",
content_type="application/json", filters={}):
"""Issue a request to NVSD Service Controller."""
headers = {"Content-Type": content_type}
api_version = "/v" + cfg.CONF.NVSD_SERVICE_CONTROLLER.api_version
uri = api_version + uri
if context.is_admin:
uri = uri + ADMIN_URL
if filters.get('tenant_id'):
uri = uri + "&filter_tenant_id=%s" % filters.get('tenant_id')[0]
url = urlparse.urljoin(self.service_api_url, uri)
response = None
try:
response = self.do_request(method, url=url, headers=headers,
data=body,
timeout=self._request_timeout)
LOG.debug("Request: %(method)s %(uri)s executed",
{'method': method, 'uri': self.service_api_url + uri})
except httplib.IncompleteRead as err:
response = err.partial
except Exception as err:
LOG.error(_("Request failed in NVSD Service Controller. "
"Error : %s"), err)
if response is None:
# Request was timed out.
LOG.error(_("Response is Null, Request for method : %(method)s to "
"%(uri)s Timed out"), {'method': method, 'uri': uri})
raise GroupPolicyException(status="TimedOut", method=method,
url=self.service_api_url + uri)
status = response.status_code
#Not Found (404) is OK for DELETE. Ignore it here
if method == 'DELETE' and status == 404:
return
elif status not in (requests.codes.ok, requests.codes.created,
requests.codes.no_content):
LOG.error(_("Unexpected response code %(status)s from NVSD "
"Service Controller for %(method)s to %(url)s"),
{'status': status, 'method': method, 'url': url})
raise GroupPolicyException(status=status, method=method,
url=self.service_api_url + uri)
else:
LOG.debug("Success: %(method)s %(url)s status=%(status)s",
{'method': method, 'url': self.service_api_url + uri,
'status': status})
response.body = response.content
return response
class NVSDServiceApi(object):
"""Invokes One Convergence NVSD Service Controller API.
Invokes the appropriate One Convergence NVSD Service Controller API for
each of the Openstack Group Based Policy API operation. Maps the Openstack
Group Policy parameters to One Convergence NVSD API parameters.
"""
def __init__(self):
self.nvsd_service_controller = NVSDServiceController()
def create_policy_classifier(self, context, policy_classifier):
body = copy.deepcopy(policy_classifier)
body.update({"port": policy_classifier.get("port_range")})
tenant_id = context.tenant_id
uri = (NVSD_POLICY_CLASSIFIER + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("POST", uri, context,
jsonutils.dumps(body))
return response.json()
def get_policy_classifiers(self, context, tenant_id, filters={}):
uri = (NVSD_POLICY_CLASSIFIER + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_policy_classifier(self, context, classifier_id):
uri = (NVSD_POLICY_CLASSIFIER + "/%s?tenant_id=%s&user_id=%s" %
(classifier_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def update_policy_classifier(self, context, classifier):
tenant_id = context.tenant_id
classifier_id = classifier.get('id')
body = copy.deepcopy(classifier)
body.update({"port": classifier.get("port_range")})
uri = (NVSD_POLICY_CLASSIFIER + "/%s?tenant_id=%s&user_id=%s" %
(classifier_id, tenant_id, context.user))
response = self.nvsd_service_controller.request("PUT", uri, context,
jsonutils.dumps(body))
return response.json()
def delete_policy_classifier(self, context, classifier_id):
tenant_id = context.tenant_id
uri = (NVSD_POLICY_CLASSIFIER + "/%s?tenant_id=%s&user_id=%s" %
(classifier_id, tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def create_policy_rule(self, context, rule):
'''
body = copy.deepcopy(rule)
body.update({'classifier': rule.get('policy_classifier_id'),
'actions': rule.get('policy_actions', []),
'policies_attached': []})
'''
tenant_id = context.tenant_id
uri = (NVSD_POLICY_RULE + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("POST", uri, context,
jsonutils.dumps(rule))
return response.json()
def update_policy_rule(self, context, rule):
tenant_id = context.tenant_id
rule_id = rule.get('id')
body = copy.deepcopy(rule)
body.update({'classifier': rule.get('policy_classifier_id'),
'actions': rule.get('policy_actions', [])})
uri = (NVSD_POLICY_RULE + "/%s?tenant_id=%s&user_id=%s" %
(rule_id, tenant_id, context.user))
response = self.nvsd_service_controller.request("PUT", uri, context,
jsonutils.dumps(body))
return response.json()
def get_policy_rule(self, context, rule_id):
uri = (NVSD_POLICY_RULE + "/%s?tenant_id=%s&user_id=%s" %
(rule_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_policy_rules(self, context, tenant_id, filters={}):
uri = (NVSD_POLICY_RULE + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def delete_policy_rule(self, context, rule_id):
uri = (NVSD_POLICY_RULE + "/%s?tenant_id=%s&user_id=%s" %
(rule_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def create_policy_action(self, context, action):
body = copy.deepcopy(action)
action_type = action.get("action_type")
if action_type.lower() == "redirect":
body["action_type"] = "l2redirect"
tenant_id = context.tenant_id
uri = (NVSD_POLICY_ACTION + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("POST", uri, context,
jsonutils.dumps(body))
return response.json()
def update_policy_action(self, context, policy_action):
tenant_id = context.tenant_id
action_id = policy_action.get('id')
body = copy.deepcopy(policy_action)
action_type = policy_action.get("action_type")
if action_type.lower() == "redirect":
body["action_type"] = "l2redirect"
uri = (NVSD_POLICY_ACTION + "/%s?tenant_id=%s&user_id=%s" %
(action_id, tenant_id, context.user))
response = self.nvsd_service_controller.request("PUT", uri, context,
jsonutils.dumps(body))
return response.json()
def get_policy_action(self, context, action_id):
uri = (NVSD_POLICY_ACTION + "/%s?tenant_id=%s&user_id=%s" %
(action_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri)
return response.json()
def get_policy_actions(self, context, tenant_id, filters={}):
uri = (NVSD_POLICY_ACTION + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def delete_policy_action(self, context, action_id):
uri = (NVSD_POLICY_ACTION + "/%s?tenant_id=%s&user_id=%s" %
(action_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def create_endpointgroup(self, context, endpointgroup):
uri = (NVSD_ENDPOINT_GROUP + "?tenant_id=%s&user_id=%s" %
(context.tenant_id, context.user))
response = self.nvsd_service_controller.request(
"POST", uri, context,
jsonutils.dumps(endpointgroup))
return response.json()
def get_endpointgroups(self, context, tenant_id, filters={}):
uri = (NVSD_ENDPOINT_GROUP + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_endpointgroup(self, context, endpointgroup_id):
uri = (NVSD_ENDPOINT_GROUP + "/%s?tenant_id=%s&user_id=%s" %
(endpointgroup_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def update_endpointgroup(self, context, endpointgroup):
tenant_id = context.tenant_id
endpointgroup_id = endpointgroup.get('id')
uri = (NVSD_ENDPOINT_GROUP + "/%s?tenant_id=%s&user_id=%s" %
(endpointgroup_id, tenant_id, context.user))
response = self.nvsd_service_controller.request(
"PUT", uri, context,
jsonutils.dumps(endpointgroup))
return response.json()
def delete_endpointgroup(self, context, endpointgroup_id):
uri = (NVSD_ENDPOINT_GROUP + "/%s?tenant_id=%s&user_id=%s" %
(endpointgroup_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def create_endpoint(self, context, endpoint):
body = copy.deepcopy(endpoint)
body.update({'connectivity_portgroup_id':
endpoint.get('policy_target_group_id')})
tenant_id = context.tenant_id
uri = (NVSD_ENDPOINT + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("POST", uri, context,
jsonutils.dumps(body))
return response.json()
def update_endpoint(self, context, endpoint):
tenant_id = context.tenant_id
endpoint_id = endpoint.get('id')
body = copy.deepcopy(endpoint)
body.update({'connectivity_portgroup_id':
endpoint.get('policy_target_group_id')})
uri = (NVSD_ENDPOINT + "/%s?tenant_id=%s&user_id=%s" %
(endpoint_id, tenant_id, context.user))
response = self.nvsd_service_controller.request("PUT", uri, context,
jsonutils.dumps(body))
return response.json()
def get_endpoint(self, context, endpoint_id):
uri = (NVSD_ENDPOINT + "/%s?tenant_id=%s&user_id=%s" %
(endpoint_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_endpoints(self, context, tenant_id, filters={}):
uri = (NVSD_ENDPOINT + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def delete_endpoint(self, context, endpoint_id):
uri = (NVSD_ENDPOINT + "/%s?tenant_id=%s&user_id=%s" %
(endpoint_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
return
def create_contract(self, context, contract):
tenant_id = context.tenant_id
uri = (NVSD_CONTRACT + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request(
"POST", uri, context, jsonutils.dumps(contract))
return response.json()
def update_contract(self, context, contract):
tenant_id = context.tenant_id
contract_id = contract.get('id')
uri = (NVSD_CONTRACT + "/%s?tenant_id=%s&user_id=%s" %
(contract_id, tenant_id, context.user))
response = self.nvsd_service_controller.request(
"PUT", uri, context, jsonutils.dumps(contract))
return response.json()
def get_contract(self, context, contract_id):
uri = (NVSD_CONTRACT + "/%s?tenant_id=%s&user_id=%s" %
(contract_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_contracts(self, context, tenant_id, filters={}):
uri = (NVSD_CONTRACT + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def delete_contract(self, context, contract_id):
uri = (NVSD_CONTRACT + "/%s?tenant_id=%s&user_id=%s" %
(contract_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def create_policy(self, context, policy):
tenant_id = context.tenant_id
uri = (NVSD_POLICY + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request(
"POST", uri, context, jsonutils.dumps(policy))
return response.json()
def update_policy(self, context, policy):
tenant_id = context.tenant_id
policy_id = policy.get('id')
uri = (NVSD_POLICY + "/%s?tenant_id=%s&user_id=%s" %
(policy_id, tenant_id, context.user))
response = self.nvsd_service_controller.request(
"PUT", uri, context, jsonutils.dumps(policy))
return response.json()
def get_policy(self, context, policy_id):
uri = (NVSD_POLICY + "/%s?tenant_id=%s&user_id=%s" %
(policy_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_policys(self, context, tenant_id, filters={}):
uri = (NVSD_POLICY + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def delete_policy(self, context, policy_id):
uri = (NVSD_POLICY + "/%s?tenant_id=%s&user_id=%s" %
(policy_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def get_nvsd_service(self, context, service_id):
uri = (NVSD_SERVICE + "/%s?tenant_id=%s&user_id=%s" %
(service_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
|
|
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import itertools
from xmlrpc import client as xmlrpclib
import netaddr
from oslotest import base as test_base
from pytz import timezone
from oslo_serialization import msgpackutils
from oslo_utils import uuidutils
_TZ_FMT = '%Y-%m-%d %H:%M:%S %Z%z'
class Color(object):
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
class ColorHandler(object):
handles = (Color,)
identity = (
msgpackutils.HandlerRegistry.non_reserved_extension_range.min_value + 1
)
@staticmethod
def serialize(obj):
blob = '%s, %s, %s' % (obj.r, obj.g, obj.b)
blob = blob.encode('ascii')
return blob
@staticmethod
def deserialize(data):
chunks = [int(c.strip()) for c in data.split(b",")]
return Color(chunks[0], chunks[1], chunks[2])
class MySpecialSetHandler(object):
handles = (set,)
identity = msgpackutils.SetHandler.identity
def _dumps_loads(obj):
obj = msgpackutils.dumps(obj)
return msgpackutils.loads(obj)
class MsgPackUtilsTest(test_base.BaseTestCase):
def test_list(self):
self.assertEqual([1, 2, 3], _dumps_loads([1, 2, 3]))
def test_empty_list(self):
self.assertEqual([], _dumps_loads([]))
def test_tuple(self):
# Seems like we do lose whether it was a tuple or not...
#
# Maybe fixed someday:
#
# https://github.com/msgpack/msgpack-python/issues/98
self.assertEqual([1, 2, 3], _dumps_loads((1, 2, 3)))
def test_dict(self):
self.assertEqual(dict(a=1, b=2, c=3),
_dumps_loads(dict(a=1, b=2, c=3)))
def test_empty_dict(self):
self.assertEqual({}, _dumps_loads({}))
def test_complex_dict(self):
src = {
'now': datetime.datetime(1920, 2, 3, 4, 5, 6, 7),
'later': datetime.datetime(1921, 2, 3, 4, 5, 6, 9),
'a': 1,
'b': 2.0,
'c': [],
'd': set([1, 2, 3]),
'zzz': uuidutils.generate_uuid(),
'yyy': 'yyy',
'ddd': b'bbb',
'today': datetime.date.today(),
}
self.assertEqual(src, _dumps_loads(src))
def test_itercount(self):
it = itertools.count(1)
next(it)
next(it)
it2 = _dumps_loads(it)
self.assertEqual(next(it), next(it2))
it = itertools.count(0)
it2 = _dumps_loads(it)
self.assertEqual(next(it), next(it2))
def test_itercount_step(self):
it = itertools.count(1, 3)
it2 = _dumps_loads(it)
self.assertEqual(next(it), next(it2))
def test_set(self):
self.assertEqual(set([1, 2]), _dumps_loads(set([1, 2])))
def test_empty_set(self):
self.assertEqual(set([]), _dumps_loads(set([])))
def test_frozenset(self):
self.assertEqual(frozenset([1, 2]), _dumps_loads(frozenset([1, 2])))
def test_empty_frozenset(self):
self.assertEqual(frozenset([]), _dumps_loads(frozenset([])))
def test_datetime_preserve(self):
x = datetime.datetime(1920, 2, 3, 4, 5, 6, 7)
self.assertEqual(x, _dumps_loads(x))
def test_datetime(self):
x = xmlrpclib.DateTime()
x.decode("19710203T04:05:06")
self.assertEqual(x, _dumps_loads(x))
def test_ipaddr(self):
thing = {'ip_addr': netaddr.IPAddress('1.2.3.4')}
self.assertEqual(thing, _dumps_loads(thing))
def test_today(self):
today = datetime.date.today()
self.assertEqual(today, _dumps_loads(today))
def test_datetime_tz_clone(self):
eastern = timezone('US/Eastern')
now = datetime.datetime.now()
e_dt = eastern.localize(now)
e_dt2 = _dumps_loads(e_dt)
self.assertEqual(e_dt, e_dt2)
self.assertEqual(e_dt.strftime(_TZ_FMT), e_dt2.strftime(_TZ_FMT))
def test_datetime_tz_different(self):
eastern = timezone('US/Eastern')
pacific = timezone('US/Pacific')
now = datetime.datetime.now()
e_dt = eastern.localize(now)
p_dt = pacific.localize(now)
self.assertNotEqual(e_dt, p_dt)
self.assertNotEqual(e_dt.strftime(_TZ_FMT), p_dt.strftime(_TZ_FMT))
e_dt2 = _dumps_loads(e_dt)
p_dt2 = _dumps_loads(p_dt)
self.assertNotEqual(e_dt2, p_dt2)
self.assertNotEqual(e_dt2.strftime(_TZ_FMT), p_dt2.strftime(_TZ_FMT))
self.assertEqual(e_dt, e_dt2)
self.assertEqual(p_dt, p_dt2)
def test_copy_then_register(self):
registry = msgpackutils.default_registry
self.assertRaises(ValueError,
registry.register, MySpecialSetHandler(),
reserved=True, override=True)
registry = registry.copy(unfreeze=True)
registry.register(MySpecialSetHandler(),
reserved=True, override=True)
h = registry.match(set())
self.assertIsInstance(h, MySpecialSetHandler)
def test_bad_register(self):
registry = msgpackutils.default_registry
self.assertRaises(ValueError,
registry.register, MySpecialSetHandler(),
reserved=True, override=True)
self.assertRaises(ValueError,
registry.register, MySpecialSetHandler())
registry = registry.copy(unfreeze=True)
registry.register(ColorHandler())
self.assertRaises(ValueError,
registry.register, ColorHandler())
def test_custom_register(self):
registry = msgpackutils.default_registry.copy(unfreeze=True)
registry.register(ColorHandler())
c = Color(255, 254, 253)
c_b = msgpackutils.dumps(c, registry=registry)
c = msgpackutils.loads(c_b, registry=registry)
self.assertEqual(255, c.r)
self.assertEqual(254, c.g)
self.assertEqual(253, c.b)
def test_object(self):
self.assertRaises(ValueError, msgpackutils.dumps, object())
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Reading register value from the inferior, and provides a
standardized interface to registers like "sp" and "pc".
"""
import collections
import ctypes
import re
import sys
from types import ModuleType
import gdb
import pwndbg.arch
import pwndbg.events
import pwndbg.memoize
import pwndbg.proc
import pwndbg.remote
class RegisterSet:
#: Program counter register
pc = None
#: Stack pointer register
stack = None
#: Frame pointer register
frame = None
#: Return address register
retaddr = None
#: Flags register (eflags, cpsr)
flags = None
#: List of native-size generalp-purpose registers
gpr = None
#: List of miscellaneous, valid registers
misc = None
#: Register-based arguments for most common ABI
regs = None
#: Return value register
retval = None
#: Common registers which should be displayed in the register context
common = None
#: All valid registers
all = None
def __init__(self,
pc='pc',
stack='sp',
frame=None,
retaddr=tuple(),
flags=dict(),
gpr=tuple(),
misc=tuple(),
args=tuple(),
retval=None):
self.pc = pc
self.stack = stack
self.frame = frame
self.retaddr = retaddr
self.flags = flags
self.gpr = gpr
self.misc = misc
self.args = args
self.retval = retval
# In 'common', we don't want to lose the ordering of:
self.common = []
for reg in gpr + (frame, stack, pc) + tuple(flags):
if reg and reg not in self.common:
self.common.append(reg)
self.all = set(i for i in misc) | set(flags) | set(self.retaddr) | set(self.common)
self.all -= {None}
def __iter__(self):
for r in self.all:
yield r
arm_cpsr_flags = collections.OrderedDict([
('N', 31), ('Z', 30), ('C', 29), ('V', 28), ('Q', 27), ('J', 24), ('T', 5), ('E', 9), ('A', 8), ('I', 7), ('F', 6)])
arm_xpsr_flags = collections.OrderedDict([
('N', 31), ('Z', 30), ('C', 29), ('V', 28), ('Q', 27), ('T', 24)])
arm = RegisterSet( retaddr = ('lr',),
flags = {'cpsr': arm_cpsr_flags},
gpr = ('r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12'),
args = ('r0','r1','r2','r3'),
retval = 'r0')
# ARM Cortex-M
armcm = RegisterSet( retaddr = ('lr',),
flags = {'xpsr': arm_xpsr_flags},
gpr = ('r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12'),
args = ('r0','r1','r2','r3'),
retval = 'r0')
# FIXME AArch64 does not have a CPSR register
aarch64 = RegisterSet( retaddr = ('lr',),
flags = {'cpsr':{}},
frame = 'x29',
gpr = ('x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9',
'x10', 'x11', 'x12', 'x13', 'x14', 'x15', 'x16', 'x17', 'x18', 'x19',
'x20', 'x21', 'x22', 'x23', 'x24', 'x25', 'x26', 'x27', 'x28'),
misc = ('w0', 'w1', 'w2', 'w3', 'w4', 'w5', 'w6', 'w7', 'w8', 'w9',
'w10', 'w11', 'w12', 'w13', 'w14', 'w15', 'w16', 'w17', 'w18', 'w19',
'w20', 'w21', 'w22', 'w23', 'w24', 'w25', 'w26', 'w27', 'w28'),
args = ('x0','x1','x2','x3'),
retval = 'x0')
x86flags = {'eflags': collections.OrderedDict([
('CF', 0),
('PF', 2),
('AF', 4),
('ZF', 6),
('SF', 7),
('IF', 9),
('DF', 10),
('OF', 11),
])}
amd64 = RegisterSet(pc = 'rip',
stack = 'rsp',
frame = 'rbp',
flags = x86flags,
gpr = ('rax','rbx','rcx','rdx','rdi','rsi',
'r8', 'r9', 'r10','r11','r12',
'r13','r14','r15'),
misc = ('cs','ss','ds','es','fs','gs',
'fsbase', 'gsbase',
'ax','ah','al',
'bx','bh','bl',
'cx','ch','cl',
'dx','dh','dl',
'dil','sil','spl','bpl',
'di','si','bp','sp','ip'),
args = ('rdi','rsi','rdx','rcx','r8','r9'),
retval = 'rax')
i386 = RegisterSet( pc = 'eip',
stack = 'esp',
frame = 'ebp',
flags = x86flags,
gpr = ('eax','ebx','ecx','edx','edi','esi'),
misc = ('cs','ss','ds','es','fs','gs',
'fsbase', 'gsbase',
'ax','ah','al',
'bx','bh','bl',
'cx','ch','cl',
'dx','dh','dl',
'di','si','bp','sp','ip'),
retval = 'eax')
# http://math-atlas.sourceforge.net/devel/assembly/elfspec_ppc.pdf
# r0 Volatile register which may be modified during function linkage
# r1 Stack frame pointer, always valid
# r2 System-reserved register (points at GOT)
# r3-r4 Volatile registers used for parameter passing and return values
# r5-r10 Volatile registers used for parameter passing
# r11-r12 Volatile registers which may be modified during function linkage
# r13 Small data area pointer register (points to TLS)
# r14-r30 Registers used for local variables
# r31 Used for local variables or "environment pointers"
powerpc = RegisterSet( retaddr = ('lr','r0'),
flags = {'msr':{},'xer':{}},
gpr = ('r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9',
'r10', 'r11', 'r12', 'r13', 'r14', 'r15', 'r16', 'r17', 'r18', 'r19',
'r20', 'r21', 'r22', 'r23', 'r24', 'r25', 'r26', 'r27', 'r28', 'r29',
'r30', 'r31'),
misc = ('cr','lr','r2'),
args = ('r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10'),
retval = 'r3')
# http://people.cs.clemson.edu/~mark/sparc/sparc_arch_desc.txt
# http://people.cs.clemson.edu/~mark/subroutines/sparc.html
# https://www.utdallas.edu/~edsha/security/sparcoverflow.htm
#
# http://people.cs.clemson.edu/~mark/sparc/assembly.txt
# ____________________________________
# %g0 == %r0 (always zero) \
# %g1 == %r1 | g stands for global
# ... |
# %g7 == %r7 |
# ____________________________________/
# %o0 == %r8 \
# ... | o stands for output (note: not 0)
# %o6 == %r14 == %sp (stack ptr) |
# %o7 == %r15 == for return address |
# ____________________________________/
# %l0 == %r16 \
# ... | l stands for local (note: not 1)
# %l7 == %r23 |
# ____________________________________/
# %i0 == %r24 \
# ... | i stands for input
# %i6 == %r30 == %fp (frame ptr) |
# %i7 == %r31 == for return address |
# ____________________________________/
sparc = RegisterSet(stack = 'sp',
frame = 'fp',
retaddr = ('i7',),
flags = {'psr':{}},
gpr = ('g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'g7',
'o0', 'o1', 'o2', 'o3', 'o4', 'o5', 'o7',
'l0', 'l1', 'l2', 'l3', 'l4', 'l5', 'l6', 'l7',
'i0', 'i1', 'i2', 'i3', 'i4', 'i5'),
args = ('i0','i1','i2','i3','i4','i5'),
retval = 'o0')
# http://logos.cs.uic.edu/366/notes/mips%20quick%20tutorial.htm
# r0 => zero
# r1 => temporary
# r2-r3 => values
# r4-r7 => arguments
# r8-r15 => temporary
# r16-r23 => saved values
# r24-r25 => temporary
# r26-r27 => interrupt/trap handler
# r28 => global pointer
# r29 => stack pointer
# r30 => frame pointer
# r31 => return address
mips = RegisterSet( frame = 'fp',
retaddr = ('ra',),
gpr = ('v0','v1','a0','a1','a2','a3',
't0', 't1', 't2', 't3', 't4', 't5', 't6', 't7', 't8', 't9',
's0', 's1', 's2', 's3', 's4', 's5', 's6', 's7', 's8'),
args = ('a0','a1','a2','a3'),
retval = 'v0')
arch_to_regs = {
'i386': i386,
'i8086': i386,
'x86-64': amd64,
'mips': mips,
'sparc': sparc,
'arm': arm,
'armcm': armcm,
'aarch64': aarch64,
'powerpc': powerpc,
}
@pwndbg.proc.OnlyWhenRunning
def gdb77_get_register(name):
return gdb.parse_and_eval('$' + name)
@pwndbg.proc.OnlyWhenRunning
def gdb79_get_register(name):
return gdb.selected_frame().read_register(name)
try:
gdb.Frame.read_register
get_register = gdb79_get_register
except AttributeError:
get_register = gdb77_get_register
# We need to manually make some ptrace calls to get fs/gs bases on Intel
PTRACE_ARCH_PRCTL = 30
ARCH_GET_FS = 0x1003
ARCH_GET_GS = 0x1004
class module(ModuleType):
last = {}
@pwndbg.memoize.reset_on_stop
@pwndbg.memoize.reset_on_prompt
def __getattr__(self, attr):
attr = attr.lstrip('$')
try:
# Seriously, gdb? Only accepts uint32.
if 'eflags' in attr or 'cpsr' in attr:
value = gdb77_get_register(attr)
value = value.cast(pwndbg.typeinfo.uint32)
else:
if attr.lower() == 'xpsr':
attr = 'xPSR'
value = get_register(attr)
size = pwndbg.typeinfo.unsigned.get(value.type.sizeof, pwndbg.typeinfo.ulong)
value = value.cast(size)
if attr.lower() == 'pc' and pwndbg.arch.current == 'i8086':
value += self.cs * 16
value = int(value)
return value & pwndbg.arch.ptrmask
except (ValueError, gdb.error):
return None
@pwndbg.memoize.reset_on_stop
@pwndbg.memoize.reset_on_prompt
def __getitem__(self, item):
if not isinstance(item, str):
print("Unknown register type: %r" % (item))
import pdb
import traceback
traceback.print_stack()
pdb.set_trace()
return None
# e.g. if we're looking for register "$rax", turn it into "rax"
item = item.lstrip('$')
item = getattr(self, item.lower())
if isinstance(item, int):
return int(item) & pwndbg.arch.ptrmask
return item
def __iter__(self):
regs = set(arch_to_regs[pwndbg.arch.current]) | {'pc', 'sp'}
for item in regs:
yield item
@property
def current(self):
return arch_to_regs[pwndbg.arch.current]
@property
def gpr(self):
return arch_to_regs[pwndbg.arch.current].gpr
@property
def common(self):
return arch_to_regs[pwndbg.arch.current].common
@property
def frame(self):
return arch_to_regs[pwndbg.arch.current].frame
@property
def retaddr(self):
return arch_to_regs[pwndbg.arch.current].retaddr
@property
def flags(self):
return arch_to_regs[pwndbg.arch.current].flags
@property
def stack(self):
return arch_to_regs[pwndbg.arch.current].stack
@property
def retval(self):
return arch_to_regs[pwndbg.arch.current].retval
@property
def all(self):
regs = arch_to_regs[pwndbg.arch.current]
retval = []
for regset in (regs.pc, regs.stack, regs.frame, regs.retaddr, regs.flags, regs.gpr, regs.misc):
if regset is None:
continue
elif isinstance(regset, (list, tuple)):
retval.extend(regset)
elif isinstance(regset, dict):
retval.extend(regset.keys())
else:
retval.append(regset)
return retval
def fix(self, expression):
for regname in set(self.all + ['sp','pc']):
expression = re.sub(r'\$?\b%s\b' % regname, r'$'+regname, expression)
return expression
def items(self):
for regname in self.all:
yield regname, self[regname]
arch_to_regs = arch_to_regs
@property
def changed(self):
delta = []
for reg, value in self.previous.items():
if self[reg] != value:
delta.append(reg)
return delta
@property
@pwndbg.memoize.reset_on_stop
def fsbase(self):
return self._fs_gs_helper(ARCH_GET_FS)
@property
@pwndbg.memoize.reset_on_stop
def gsbase(self):
return self._fs_gs_helper(ARCH_GET_GS)
@pwndbg.memoize.reset_on_stop
def _fs_gs_helper(self, which):
"""Supports fetching based on segmented addressing, a la fs:[0x30].
Requires ptrace'ing the child directly."""
# We can't really do anything if the process is remote.
if pwndbg.remote.is_remote(): return 0
# Use the lightweight process ID
pid, lwpid, tid = gdb.selected_thread().ptid
# Get the register
ppvoid = ctypes.POINTER(ctypes.c_void_p)
value = ppvoid(ctypes.c_void_p())
value.contents.value = 0
libc = ctypes.CDLL('libc.so.6')
result = libc.ptrace(PTRACE_ARCH_PRCTL,
lwpid,
value,
which)
if result == 0:
return (value.contents.value or 0) & pwndbg.arch.ptrmask
return 0
def __repr__(self):
return ('<module pwndbg.regs>')
# To prevent garbage collection
tether = sys.modules[__name__]
sys.modules[__name__] = module(__name__, '')
@pwndbg.events.cont
@pwndbg.events.stop
def update_last():
M = sys.modules[__name__]
M.previous = M.last
M.last = {k:M[k] for k in M.common}
if pwndbg.config.show_retaddr_reg:
M.last.update({k:M[k] for k in M.retaddr})
|
|
# -*- Mode: Python; tab-width: 4 -*-
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
r"""A class supporting chat-style (command/response) protocols.
This class adds support for 'chat' style protocols - where one side
sends a 'command', and the other sends a response (examples would be
the common internet protocols - smtp, nntp, ftp, etc..).
The handle_read() method looks at the input stream for the current
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
for multi-line output), calling self.found_terminator() on its
receipt.
for example:
Say you build an async nntp client using this class. At the start
of the connection, you'll have self.terminator set to '\r\n', in
order to process the single-line greeting. Just before issuing a
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
command will be accumulated (using your own 'collect_incoming_data'
method) up to the terminator, and then control will be returned to
you - by calling your self.found_terminator() method.
"""
import socket
import asyncore
from collections import deque
class async_chat (asyncore.dispatcher):
"""This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()"""
# these are overridable defaults
ac_in_buffer_size = 65536
ac_out_buffer_size = 65536
# we don't want to enable the use of encoding by default, because that is a
# sign of an application bug that we don't want to pass silently
use_encoding = 0
encoding = 'latin-1'
def __init__ (self, sock=None, map=None):
# for string terminator matching
self.ac_in_buffer = b''
# we use a list here rather than io.BytesIO for a few reasons...
# del lst[:] is faster than bio.truncate(0)
# lst = [] is faster than bio.truncate(0)
self.incoming = []
# we toss the use of the "simple producer" and replace it with
# a pure deque, which the original fifo was a wrapping of
self.producer_fifo = deque()
asyncore.dispatcher.__init__ (self, sock, map)
def collect_incoming_data(self, data):
raise NotImplementedError("must be implemented in subclass")
def _collect_incoming_data(self, data):
self.incoming.append(data)
def _get_data(self):
d = b''.join(self.incoming)
del self.incoming[:]
return d
def found_terminator(self):
raise NotImplementedError("must be implemented in subclass")
def set_terminator (self, term):
"Set the input delimiter. Can be a fixed string of any length, an integer, or None"
if isinstance(term, str) and self.use_encoding:
term = bytes(term, self.encoding)
self.terminator = term
def get_terminator (self):
return self.terminator
# grab some more data from the socket,
# throw it to the collector method,
# check for the terminator,
# if found, transition to the next state.
def handle_read (self):
try:
data = self.recv (self.ac_in_buffer_size)
except OSError as why:
self.handle_error()
return
if isinstance(data, str) and self.use_encoding:
data = bytes(str, self.encoding)
self.ac_in_buffer = self.ac_in_buffer + data
# Continue to search for self.terminator in self.ac_in_buffer,
# while calling self.collect_incoming_data. The while loop
# is necessary because we might read several data+terminator
# combos with a single recv(4096).
while self.ac_in_buffer:
lb = len(self.ac_in_buffer)
terminator = self.get_terminator()
if not terminator:
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
elif isinstance(terminator, int):
# numeric terminator
n = terminator
if lb < n:
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
self.terminator = self.terminator - lb
else:
self.collect_incoming_data (self.ac_in_buffer[:n])
self.ac_in_buffer = self.ac_in_buffer[n:]
self.terminator = 0
self.found_terminator()
else:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len = len(terminator)
index = self.ac_in_buffer.find(terminator)
if index != -1:
# we found the terminator
if index > 0:
# don't bother reporting the empty string (source of subtle bugs)
self.collect_incoming_data (self.ac_in_buffer[:index])
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
# This does the Right Thing if the terminator is changed here.
self.found_terminator()
else:
# check for a prefix of the terminator
index = find_prefix_at_end (self.ac_in_buffer, terminator)
if index:
if index != lb:
# we found a prefix, collect up to the prefix
self.collect_incoming_data (self.ac_in_buffer[:-index])
self.ac_in_buffer = self.ac_in_buffer[-index:]
break
else:
# no prefix, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
def handle_write (self):
self.initiate_send()
def handle_close (self):
self.close()
def push (self, data):
sabs = self.ac_out_buffer_size
if len(data) > sabs:
for i in range(0, len(data), sabs):
self.producer_fifo.append(data[i:i+sabs])
else:
self.producer_fifo.append(data)
self.initiate_send()
def push_with_producer (self, producer):
self.producer_fifo.append(producer)
self.initiate_send()
def readable (self):
"predicate for inclusion in the readable for select()"
# cannot use the old predicate, it violates the claim of the
# set_terminator method.
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
return 1
def writable (self):
"predicate for inclusion in the writable for select()"
return self.producer_fifo or (not self.connected)
def close_when_done (self):
"automatically close this channel once the outgoing queue is empty"
self.producer_fifo.append(None)
def initiate_send(self):
while self.producer_fifo and self.connected:
first = self.producer_fifo[0]
# handle empty string/buffer or None entry
if not first:
del self.producer_fifo[0]
if first is None:
## print("first is None")
self.handle_close()
return
## print("first is not None")
# handle classic producer behavior
obs = self.ac_out_buffer_size
try:
data = first[:obs]
except TypeError:
data = first.more()
if data:
self.producer_fifo.appendleft(data)
else:
del self.producer_fifo[0]
continue
if isinstance(data, str) and self.use_encoding:
data = bytes(data, self.encoding)
# send the data
try:
num_sent = self.send(data)
except OSError:
self.handle_error()
return
if num_sent:
if num_sent < len(data) or obs < len(first):
self.producer_fifo[0] = first[num_sent:]
else:
del self.producer_fifo[0]
# we tried to send some actual data
return
def discard_buffers (self):
# Emergencies only!
self.ac_in_buffer = b''
del self.incoming[:]
self.producer_fifo.clear()
class simple_producer:
def __init__ (self, data, buffer_size=512):
self.data = data
self.buffer_size = buffer_size
def more (self):
if len (self.data) > self.buffer_size:
result = self.data[:self.buffer_size]
self.data = self.data[self.buffer_size:]
return result
else:
result = self.data
self.data = b''
return result
class fifo:
def __init__ (self, list=None):
if not list:
self.list = deque()
else:
self.list = deque(list)
def __len__ (self):
return len(self.list)
def is_empty (self):
return not self.list
def first (self):
return self.list[0]
def push (self, data):
self.list.append(data)
def pop (self):
if self.list:
return (1, self.list.popleft())
else:
return (0, None)
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
# characters matched.
# for example:
# f_p_a_e ("qwerty\r", "\r\n") => 1
# f_p_a_e ("qwertydkjf", "\r\n") => 0
# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
# this could maybe be made faster with a computed regex?
# [answer: no; circa Python-2.0, Jan 2001]
# new python: 28961/s
# old python: 18307/s
# re: 12820/s
# regex: 14035/s
def find_prefix_at_end (haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l
|
|
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from wagtail.admin.admin_url_finder import AdminURLFinder
from wagtail.core.models import Page, Site
from wagtail.tests.utils import WagtailTestUtils
class TestSiteIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.home_page = Page.objects.get(id=2)
def get(self, params={}):
return self.client.get(reverse('wagtailsites:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/generic/index.html')
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestSiteCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.home_page = Page.objects.get(id=2)
self.localhost = Site.objects.all()[0]
def get(self, params={}):
return self.client.get(reverse('wagtailsites:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailsites:add'), post_data)
def create_site(self, hostname='testsite', port=80, is_default_site=False, root_page=None):
root_page = root_page or self.home_page
Site.objects.create(
hostname=hostname,
port=port,
is_default_site=is_default_site,
root_page=root_page)
def test_default_fixtures_present(self):
# we should have loaded with a single site
self.assertEqual(self.localhost.hostname, 'localhost')
self.assertEqual(self.localhost.port, 80)
self.assertIs(self.localhost.is_default_site, True)
self.assertEqual(self.localhost.root_page, self.home_page)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsites/create.html')
def test_create(self):
response = self.post({
'hostname': "testsite",
'port': "80",
'root_page': str(self.home_page.id),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was created
self.assertEqual(Site.objects.filter(hostname='testsite').count(), 1)
def test_duplicate_defaults_not_allowed(self):
response = self.post({
'hostname': "also_default",
'port': "80",
'is_default_site': "on",
'root_page': str(self.home_page.id),
})
# Should return the form with errors
self.assertEqual(response.status_code, 200)
self.assertIs(bool(response.context['form'].errors), True)
# Check that the site was not created
sites = Site.objects.filter(hostname='also_default')
self.assertEqual(sites.count(), 0)
def test_duplicate_hostnames_on_different_ports_allowed(self):
response = self.post({
'hostname': "localhost",
'port': "8000",
'root_page': str(self.home_page.id),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was created
self.assertEqual(Site.objects.filter(hostname='localhost').count(), 2)
def test_duplicate_hostnames_on_same_port_not_allowed(self):
# Confirm there's one localhost already
self.assertEqual(Site.objects.filter(hostname='localhost').count(), 1)
response = self.post({
'hostname': "localhost",
'port': "80",
'root_page': str(self.home_page.id),
})
# Should return the form with errors
self.assertEqual(response.status_code, 200)
self.assertIs(bool(response.context['form'].errors), True)
# Check that the site was not created, still only one localhost entry
self.assertEqual(Site.objects.filter(hostname='localhost').count(), 1)
class TestSiteEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
self.home_page = Page.objects.get(id=2)
self.localhost = Site.objects.all()[0]
def get(self, params={}, site_id=None):
return self.client.get(reverse('wagtailsites:edit', args=(site_id or self.localhost.id, )), params)
def post(self, post_data={}, site_id=None):
site_id = site_id or self.localhost.id
site = Site.objects.get(id=site_id)
post_defaults = {
'hostname': site.hostname,
'port': site.port,
'root_page': site.root_page.id,
}
for k, v in post_defaults.items():
post_data[k] = post_data.get(k, v)
if 'default' in post_data:
if post_data['default']: # only include the is_default_site key if we want to set it
post_data['is_default_site'] = 'on'
elif site.is_default_site:
post_data['is_default_site'] = 'on'
return self.client.post(reverse('wagtailsites:edit', args=(site_id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsites/edit.html')
url_finder = AdminURLFinder(self.user)
expected_url = '/admin/sites/%d/' % self.localhost.id
self.assertEqual(url_finder.get_edit_url(self.localhost), expected_url)
def test_nonexistant_redirect(self):
self.assertEqual(self.get(site_id=100000).status_code, 404)
def test_edit(self):
edited_hostname = 'edited'
response = self.post({
'hostname': edited_hostname,
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was edited
self.assertEqual(Site.objects.get(id=self.localhost.id).hostname, edited_hostname)
def test_changing_the_default_site_workflow(self):
# First create a second, non-default, site
second_site = Site.objects.create(
hostname="not_yet_default",
port=80,
is_default_site=False,
root_page=self.home_page)
# Make the original default no longer default
response = self.post(
{
'default': False,
},
site_id=self.localhost.id
)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site is no longer default
self.assertIs(Site.objects.get(id=self.localhost.id).is_default_site, False)
# Now make the second site default
response = self.post(
{
'default': True,
},
site_id=second_site.id
)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the second site is now set as default
self.assertIs(Site.objects.get(id=second_site.id).is_default_site, True)
def test_making_a_second_site_the_default_not_allowed(self):
second_site = Site.objects.create(
hostname="also_default",
port=80,
is_default_site=False,
root_page=self.home_page)
response = self.post(
{
'default': True,
},
site_id=second_site.id
)
# Should return the form with errors
self.assertEqual(response.status_code, 200)
self.assertIs(bool(response.context['form'].errors), True)
# Check that the site was not editd
self.assertIs(Site.objects.get(id=second_site.id).is_default_site, False)
class TestSiteDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.home_page = Page.objects.get(id=2)
self.localhost = Site.objects.all()[0]
def get(self, params={}, site_id=None):
return self.client.get(reverse('wagtailsites:delete', args=(site_id or self.localhost.id, )), params)
def post(self, post_data={}, site_id=None):
return self.client.post(reverse('wagtailsites:delete', args=(site_id or self.localhost.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/generic/confirm_delete.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(site_id=100000).status_code, 404)
def test_posting_deletes_site(self):
response = self.post()
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was edited
with self.assertRaises(Site.DoesNotExist):
Site.objects.get(id=self.localhost.id)
class TestLimitedPermissions(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user
user = self.create_user(username='test', password='password')
user.user_permissions.add(
Permission.objects.get(codename='access_admin'),
Permission.objects.get(codename='add_site'),
Permission.objects.get(codename='change_site'),
Permission.objects.get(codename='delete_site')
)
# Login
self.login(username='test', password='password')
self.home_page = Page.objects.get(id=2)
self.localhost = Site.objects.all()[0]
def test_get_index(self):
response = self.client.get(reverse('wagtailsites:index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/generic/index.html')
def test_get_create_view(self):
response = self.client.get(reverse('wagtailsites:add'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsites/create.html')
def test_create(self):
response = self.client.post(reverse('wagtailsites:add'), {
'hostname': "testsite",
'port': "80",
'root_page': str(self.home_page.id),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was created
self.assertEqual(Site.objects.filter(hostname='testsite').count(), 1)
def test_get_edit_view(self):
edit_url = reverse('wagtailsites:edit', args=(self.localhost.id,))
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsites/edit.html')
def test_edit(self):
edit_url = reverse('wagtailsites:edit', args=(self.localhost.id,))
edited_hostname = 'edited'
response = self.client.post(edit_url, {
'hostname': edited_hostname,
'port': 80,
'root_page': self.home_page.id,
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was edited
self.assertEqual(Site.objects.get(id=self.localhost.id).hostname, edited_hostname)
def test_get_delete_view(self):
delete_url = reverse('wagtailsites:delete', args=(self.localhost.id,))
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/generic/confirm_delete.html')
def test_delete(self):
delete_url = reverse('wagtailsites:delete', args=(self.localhost.id,))
response = self.client.post(delete_url)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was edited
with self.assertRaises(Site.DoesNotExist):
Site.objects.get(id=self.localhost.id)
|
|
"""Required models to handle and store generated reports."""
import base64
import pickle
from datetime import date
from decimal import Decimal, getcontext
from timeit import default_timer as timer
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.db import models
from django.template import Context
from django.template.loader import get_template
from . import settings as app_settings
from .managers import SchedulerManager
from .utils import my_import, str_to_date
ONMYDESK_FILE_HANDLER = getattr(settings, 'ONMYDESK_FILE_HANDLER', None)
class ReportNotSavedException(Exception):
"""Exception used when a report is not saved."""
pass
def output_file_handler(filepath):
"""Return the output filepath (handled or not by an external function).
This function tries to find a function handler in `settings.ONMYDESK_FILE_HANDLER`. It
must receive a filepath and returns a new filepath (or url, e.g.) to be stored in the
report register. It's useful to handle the report results (move to other dirs ou to cloud).
:param str filepath: File path to output generated by report.
:returns: File path to output (processed or not by a external handler)
:rtype: str
"""
function_handler = app_settings.ONMYDESK_FILE_HANDLER
if not function_handler:
return filepath
handler = my_import(function_handler)
return handler(filepath)
class Report(models.Model):
"""Report model to store generated reports."""
STATUS_PENDING = 'pending'
STATUS_PROCESSING = 'processing'
STATUS_PROCESSED = 'processed'
STATUS_ERROR = 'error'
STATUS_CHOICES = (
(STATUS_PENDING, 'Pending'),
(STATUS_PROCESSING, 'Processing'),
(STATUS_PROCESSED, 'Processed'),
(STATUS_ERROR, 'Error'),
)
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default=STATUS_PENDING)
process_time = models.DecimalField(verbose_name='Process time (secs)', max_digits=10,
decimal_places=4, null=True, blank=True)
params = models.BinaryField(verbose_name='Report params', null=True, blank=True)
report = models.CharField(max_length=255)
results = models.CharField(max_length=255, null=True, blank=True)
insert_date = models.DateTimeField('Creation Date', auto_now_add=True)
update_date = models.DateTimeField('Update Date', auto_now=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)
def __str__(self):
"""Return string representation of object."""
if not self.report:
return 'Report object'
report_class = my_import(self.report)
return '{}{}'.format(
report_class.name,
' #{}'.format(self.id) if self.id else '')
def set_params(self, params):
"""Set params to be used when report is processed.
:param dict params: Dictionary with params to be used to process report.
"""
self.params = base64.b64encode(pickle.dumps(params))
def get_params(self):
"""Return param to be used to process report.
:return: Report params
"""
if self.params:
return pickle.loads(base64.b64decode(self.params))
return None
def process(self):
"""Process this report.
After processing the outputs will be stored at `results`.
To access output results is recommended to use :func:`results_as_list`.
"""
if not self.id:
raise ReportNotSavedException()
self.status = Report.STATUS_PROCESSING
self.save(update_fields=['status'])
report_class = my_import(self.report)
report = report_class(params=self.get_params())
try:
getcontext().prec = 5
start = Decimal(timer())
report.process()
self.process_time = Decimal(timer()) - start
results = []
for filepath in report.output_filepaths:
results.append(output_file_handler(filepath))
self.results = ';'.join(results)
self.status = Report.STATUS_PROCESSED
self.save(update_fields=['status'])
except Exception as e:
self.status = Report.STATUS_ERROR
self.save(update_fields=['status'])
raise e
@property
def result_links(self):
"""Return a list with links to access report results.
:returns: List of links to access results
:rtype: list
"""
link_handler = app_settings.ONMYDESK_DOWNLOAD_LINK_HANDLER
if not link_handler:
return '#'
handler = my_import(link_handler)
return [handler(i) for i in self.results_as_list]
@property
def results_as_list(self):
"""Return a list of output results stored in this model.
:returns: List of results
:rtype: list
"""
if not self.results:
return []
return self.results.split(';')
class Scheduler(models.Model):
"""Model used to schedule reports.
It'd be used to schedule reports to be generated with some
periodicity (every monday, from monday to friday, etc.)
"""
objects = SchedulerManager()
PER_MON_FRI = 'mon_fri'
PER_MON_SUN = 'mon_sun'
PER_SUN = 'sun'
PER_MON = 'mon'
PER_TUE = 'tue'
PER_WED = 'wed'
PER_THU = 'thu'
PER_FRI = 'fri'
PER_SAT = 'sat'
PERIODICITIES = (
(PER_MON_FRI, 'Monday to Friday'),
(PER_MON_SUN, 'Monday to Sunday'),
(PER_SUN, 'Every Sunday'),
(PER_MON, 'Every Monday'),
(PER_TUE, 'Every Tuesday'),
(PER_WED, 'Every Wednesday'),
(PER_THU, 'Every Thursday'),
(PER_FRI, 'Every Friday'),
(PER_SAT, 'Every Saturday'),
)
PERIODICITIES_BY_WEEKDAY = {
# Monday is 0 and Sunday is 6
0: {PER_MON_SUN, PER_MON_FRI, PER_MON},
1: {PER_MON_SUN, PER_MON_FRI, PER_TUE},
2: {PER_MON_SUN, PER_MON_FRI, PER_WED},
3: {PER_MON_SUN, PER_MON_FRI, PER_THU},
4: {PER_MON_SUN, PER_MON_FRI, PER_FRI},
5: {PER_MON_SUN, PER_SAT},
6: {PER_MON_SUN, PER_SUN},
}
report = models.CharField(max_length=255)
periodicity = models.CharField(max_length=20, choices=PERIODICITIES)
params = models.BinaryField(verbose_name='Parameters', null=True, blank=True)
notify_emails = models.CharField('E-mail\'s to notify after process (separated by ",")',
max_length=1000, null=True, blank=True)
insert_date = models.DateTimeField('Creation Date', auto_now_add=True)
update_date = models.DateTimeField('Update Date', auto_now=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)
def __str__(self):
"""Return string representation of object."""
if not self.report:
return 'Scheduler object'
report_class = my_import(self.report)
return '{}{}'.format(
report_class.name,
' #{}'.format(self.id) if self.id else '')
def set_params(self, params):
"""Set params to be used when report is processed.
:param dict params: Dictionary with params to be used to process report.
"""
self.params = base64.b64encode(pickle.dumps(params))
def get_params(self):
"""Return params to be used to process report.
:return: Report params
"""
if self.params:
return pickle.loads(base64.b64decode(self.params))
return None
def process(self, reference_date=None):
"""Process scheduler creating and returing a report.
After processing, this method tries to notify e-mails filled in notify_emails field.
:returns: Report result
:rtype: Report
"""
report = Report(report=self.report,
# Avoid other routines to get this report to process
status=Report.STATUS_PROCESSING,
created_by=self.created_by)
report.set_params(self.get_processed_params(reference_date))
report.save()
report.process()
report.save()
self._notify(report)
return report
def get_processed_params(self, reference_date=None):
"""Return params to be used to process report.
:param date reference_date: Date to use as reference
:returns: Dict with params
:rtype: dict
"""
reference_date = reference_date or date.today()
report_class = my_import(self.report)
params = self.get_params()
if not params:
return None
form = report_class.get_form()
if not form:
return params
for name, field in form.base_fields.items():
if name in params and isinstance(field, forms.fields.DateField):
params[name] = str_to_date(params[name], reference_date)
return params
# TODO: Move this to a signal handler ('scheduler processed' or something like that).
def _notify(self, report):
text_template = get_template('onmydesk/scheduler-notify.txt')
html_template = get_template('onmydesk/scheduler-notify.html')
destinations = self.notify_emails.split(',') if self.notify_emails else []
if not destinations:
return
context = dict(
scheduler=self,
report=report,
)
text_content = text_template.render(Context(context))
html_content = html_template.render(Context(context))
send_mail(
app_settings.ONMYDESK_SCHEDULER_NOTIFY_SUBJECT.format(
report_name=str(report)),
text_content,
app_settings.ONMYDESK_NOTIFY_FROM,
self.notify_emails.split(','),
html_message=html_content)
|
|
__author__ = "George Chantzialexiou"
__copyright__ = "Copyright 2013-2014, http://radical.rutgers.edu"
__license__ = "MIT"
import sys
import os
import radical.pilot as rp
import time
import copy
import numpy as np
SHARED_INPUT_FILE = 'dataset.in'
MY_STAGING_AREA = 'staging:///'
""" DESCRIPTION: k-means
For every task A_n (mapper) is started
"""
# READ: The RADICAL-Pilot documentation:
# http://radicalpilot.readthedocs.org/en/latest
#
# Try running this example with RADICAL_PILOT_VERBOSE=debug set if
# you want to see what happens behind the scenes!
#------------------------------------------------------------------------------
#
def pilot_state_cb (pilot, state) :
""" this callback is invoked on all pilot state changes """
print "[Callback]: ComputePilot '%s' state: %s." % (pilot.uid, state)
if state == rp.FAILED :
sys.exit (1)
#------------------------------------------------------------------------------
#
def unit_state_cb (unit, state) :
""" this callback is invoked on all unit state changes """
print "[Callback]: ComputeUnit '%s' state: %s." % (unit.uid, state)
# ------------------------------------------------------------------------------
#
def get_distance(dataPoint, centroid):
# Calculate Euclidean distance.
return np.sqrt(sum((dataPoint - centroid) ** 2))
# ------------------------------------------------------------------------------
#
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) < 1:
print "Usage: Give the number of the divisions you want to create Try:"
print "python k-means k"
sys.exit(-1)
k = int(sys.argv[1]) # number of the divisions - clusters
DIMENSIONS = 3
# Check if the dataset exists and count the total number of lines of the dataset
try:
data = open(SHARED_INPUT_FILE,'r')
except IOError:
print "Missing data-set. file! Check the name of the dataset"
sys.exit(-1)
total_file_lines = sum(1 for _ in data)
#-----------------------------------------------------------------------
#Choose randomly k elements from the dataset as centroids
data.seek(0,0) # move fd to the beginning of the file
centroid = list()
for i in range(0,DIMENSIONS*k):
centroid.append(data.readline())
data.close()
centroid = map(float,centroid)
print centroid
#--------------------------------------------------------------------------
## Put the centroids into a file to share
centroid_to_string = ','.join(map(str,centroid))
centroid_file = open('centroids.data', 'w')
centroid_file.write(centroid_to_string)
centroid_file.close()
#-------------------------------------------------------------------------
# Initialization of variables
CUs = 2 # NOTE: Define how many CUs you are willing to use
convergence = False # We have no convergence yet
m = 0 # number of iterations
maxIt = 10 # the maximum number of iteration
chunk_size = total_file_lines/DIMENSIONS
chunk_size = chunk_size/CUs
chunk_size *= DIMENSIONS # this is the size of the part that each unit is going to control
#------------------------
try:
start_time = time.time()
#DBURL = "mongodb://localhost:27017"
session = rp.Session()
# ----- CHANGE THIS -- CHANGE THIS -- CHANGE THIS -- CHANGE THIS ------
#
# Change the user name below if you are using a remote resource
# and your username on that resource is different from the username
# on your local machine.
#
#c = rp.Context('ssh')
#c.user_id = "username"
#c.user_pass = "passcode"
#session.add_context(c)
# Add a Pilot Manager. Pilot managers manage one or more ComputePilots.
print "Initializing Pilot Manager ..."
pmgr = rp.PilotManager(session=session)
# Register our callback with the PilotManager. This callback will get
# called every time any of the pilots managed by the PilotManager
# change their state.
pmgr.register_callback(pilot_state_cb)
# ----- CHANGE THIS -- CHANGE THIS -- CHANGE THIS -- CHANGE THIS ------
#
# If you want to run this example on your local machine, you don't have
# to change anything here.
#
# Change the resource below if you want to run on a remote resource.
# You also might have to set the 'project' to your allocation ID if
# your remote resource does compute time accounting.
#
# A list of preconfigured resources can be found at:
# http://radicalpilot.readthedocs.org/en/latest/machconf.html#preconfigured-resources
#
# define the resources you need
pdesc = rp.ComputePilotDescription()
pdesc.resource = "local.localhost" # NOTE: This is a "label", not a hostname
pdesc.runtime = 10 # minutes
pdesc.cores = CUs # define cores
pdesc.cleanup = False
# submit the pilot.
print "Submitting Compute Pilot to Pilot Manager ..."
pilot = pmgr.submit_pilots(pdesc)
#-----------------------------------------------------------------------
# Define the url of the local file in the local directory
shared_input_file_url = 'file://%s/%s' % (os.getcwd(), SHARED_INPUT_FILE)
staged_file = "%s%s" % (MY_STAGING_AREA, SHARED_INPUT_FILE)
# Configure the staging directive for to insert the shared file into
# the pilot staging directory.
sd_pilot = {'source': shared_input_file_url,
'target': staged_file,
'action': rp.TRANSFER
}
# Synchronously stage the data to the pilot
pilot.stage_in(sd_pilot)
# Configure the staging directive for shared input file.
sd_shared = {'source': staged_file,
'target': SHARED_INPUT_FILE,
'action': rp.LINK
}
# Combine the ComputePilot, the ComputeUnits and a scheduler via
# a UnitManager object.
print "Initializing Unit Manager ..."
umgr = rp.UnitManager(session, rp.SCHED_DIRECT_SUBMISSION)
# Register our callback with the UnitManager. This callback will get
# called every time any of the units managed by the UnitManager
# change their state.
umgr.register_callback(unit_state_cb)
# Add the created ComputePilot to the UnitManager.
print "Registering Compute Pilot with Unit Manager ..."
umgr.add_pilots(pilot)
#-------------------------------------------------------------------------
## CUS & map - reduce
while m<maxIt and False == convergence:
## MAPPER PHASE
mylist = []
for i in range(1,CUs+1):
cudesc = rp.ComputeUnitDescription()
cudesc.executable = "python"
cudesc.arguments = ['mapper.py', i, k, chunk_size, CUs, DIMENSIONS]
cudesc.input_staging = ['mapper.py', sd_shared, 'centroids.data']
cudesc.output_staging = ["combiner_file_%d.data" % i]
mylist.append(cudesc)
print 'Submitting the CU to the Unit Manager...'
mylist_units = umgr.submit_units(mylist)
# wait for all units to finish
umgr.wait_units()
print "All Compute Units completed successfully!"
#-------------------------------------------------------------------------------
# Aggregate all partial sums of each Cluster to define the new centroids
afile = []
total_sums = [] # total partial sums per cluster
total_nums = [] # total number of sample samples per cluster
new_centroids = list()
# initiate values
for i in range(0,k):
total_nums.append(0)
total_sums.append(0)
new_centroids.append(0)
for i in range(0,CUs):
afile.append(open("combiner_file_%d.data" % (i+1), "rb"))
for line in afile[i]:
line = line.strip() #remove newline character
cluster,p_sum,num = line.split('\t',3) # split line into cluster No, partial sum and number of partial sums
cluster = int(cluster)
p_sum = p_sum.split(',')
p_sum = map(float,p_sum)
p_sum = np.asfarray(p_sum)
total_sums[cluster] += p_sum
total_nums[cluster] += int(num)
afile[i].close()
# new values
convergence = True
m+=1
for i in range(0,k):
if total_nums[i]!=0:
new_centroids[i] = total_sums[i] / total_nums[i]
centroid = np.asfarray(centroid)
centroid = np.reshape(centroid,(-1,DIMENSIONS))
arr = np.array([0,0,0])
# check convergence and update centroids
for i in range(0,k):
if total_nums[i]!=0 and abs(get_distance(centroid[i],arr) - get_distance(new_centroids[i],arr))>=0.1*get_distance(centroid[i],arr):
convergence = False
if total_nums[i]!=0:
centroid[i] = new_centroids[i]
centroid = np.reshape(centroid,k*DIMENSIONS).tolist()
# Put the centroids into a file to share
centroid_to_string = ','.join(map(str,centroid))
centroid_file = open('centroids.data', 'w')
centroid_file.write(centroid_to_string)
centroid_file.close()
#--------------------------------------------------------------------------------
#--------------------END OF K-MEANS ALGORITHM --------------------------#
# K - MEANS ended successfully - print total times and centroids
print 'K-means algorithm ended successfully after %d iterations' % m
total_time = time.time() - start_time # total execution time
print 'The total execution time is: %f seconds' % total_time
total_time /= 60
print 'Which is: %f minutes' % total_time
print 'Centroids:'
centroid = np.asfarray(centroid)
centroid= np.reshape(centroid,(-1,DIMENSIONS))
print centroid
#cleanup intermediate files
for i in range(1,CUs+1):
string = 'combiner_file_%d.data' % i
os.remove(string)
except Exception as e:
# Something unexpected happened in the pilot code above
print "caught Exception: %s" % e
raise
except (KeyboardInterrupt, SystemExit) as e:
# the callback called sys.exit(), and we can here catch the
# corresponding KeyboardInterrupt exception for shutdown. We also catch
# SystemExit (which gets raised if the main threads exits for some other
# reason).
print "need to exit now: %s" % e
finally:
# always clean up the session, no matter if we caught an exception or
# not.
print "Session closed, exiting now ..."
session.close(cleanup=True, terminate=True)
|
|
import logging
import re
import warnings
from urllib import urlencode
from django.template import Context
from django.template.loader import get_template
from monocle.cache import cache
from monocle.resources import Resource
from monocle.settings import settings
from monocle.tasks import request_external_oembed
from monocle.util import synced
logger = logging.getLogger(__name__)
class InvalidProvider(Exception):
"""
General purpose exception used to indicated a provider is misconfigured
"""
pass
class Provider(object):
"""
A Provider is essentially an OEmbed endpoint that, well, provides
rich content to a consumer. Providers are aware of URL patterns of
rich content they provide and what URL endpoint should be contacted
to retrieve a resource for OEmbedding.
Monocle providers should be configured to expose an attribute representing
the type of content they provide. Valid options are:
* rich
* link
* video
* photo
Monocle providers are also optionally exposed from a django url (configured
in :mod:`monocle.urls`). Exposing monocle providers allows external sources
to consume rich content from django
"""
api_endpoint = None
url_schemes = None
resource_type = None
is_active = True # Enable this provider to serve content
expose = False # Expose this provider externally
_internal = False
def get_resource(self, url, **kwargs):
"""
Obtain an OEmbed resource JSON
:param string url: Requested rich content URL
:param kwargs: Optional arguments along with this request.
Currently only ``maxwidth`` and ``maxheight`` supported.
:returns: :class:`monocle.resources.Resource`
.. note::
Currently only JSON-compatible requests are honored. If a request is
made for an XML resource, it will still return a JSON resource
"""
params = kwargs
params['url'] = url
# Only support JSON format
params['format'] = 'json'
request_url = self.get_request_url(**params)
logger.info('Obtaining OEmbed resource at %s' % request_url)
cached, primed = cache.get_or_prime(request_url, primer=Resource(params['url']))
if primed or cached.is_stale:
# Prevent many tasks being issued
if cached.is_stale:
cache.set(request_url, cached.refresh())
request_external_oembed.apply_async((request_url,))
logger.info('Scheduled external request for OEmbed resource %s' % url)
return cached
def get_request_url(self, **params):
"""
Constructs a request URL to the provider API endpoint with kwargs
as URL parameters. Removes maxwidth and maxheight if they are like
zero (i.e 0, '0' or None)
:returns: Escaped endpoint url
"""
zeros = (0, '0', None)
# Remove maxwidth/maxheight of "0"
for param in ('maxwidth', 'maxheight'):
if param in params and params[param] in zeros:
del params[param]
return '%s?%s' % (self.api_endpoint, urlencode(params))
@classmethod
def schemes_to_regex_str(cls, schemes):
"""
Replace wildcards with non-greedy dot-all and escaped true dots.
Since many providers may honor several URL patterns for content they
serve, this will essentially create a single regular expression that
can test all of them simultaneously.
For Example
>>> cls.schemes_to_regex_str(['http://foo.com/a/*', 'http://foo.com/b/*'])
"(http://foo.com/a/.\*?|http://foo.com/b/.\*?|http://foo.com/c/.\*?)"
:param list schemes: URL pattern strings
:returns: A single regex string
"""
regex = '(%s)' % '|'.join(map(str, schemes))
regex = regex.replace('.', '\\.').replace('*', '.*?')
return regex
def match(self, url):
"""
Tests a url against the url schemes of a provider instance
:param string url: URL to test
:returns: Bool False if provider has no schemes, None if no match found,
a python re match if found
"""
if self.url_schemes:
return re.match(self.schemes_to_regex_str(self.url_schemes), url, re.I)
else:
logger.warning('No URL schemes defined for provider %s' % self.__class__.__name__)
return False
def nearest_allowed_size(self, width, height, maxwidth=None, maxheight=None):
"""
Obtain a 'nearest size' that is just below a specific maximum. In other words,
this is min(current_size, max_size). This will scan either the provider's
``DIMENSION`` attribute (a list of int two-tuples representing valid sizes) or
``RESOURCE_DEFAULT_DIMENSIONS`` configured in :mod:`monocle.settings`.
:param integer width: Current width integer
:param integer height: Current height integer
:param integer maxwidth: Maximum width integer to constrain within
:param integer maxheight: Maximum height integer to constrain within
:returns: Size as integer two-tuple (width, height). This is either
largest allowable size or the default maximum if no allowable size
found
"""
logger.debug('Resizing (%s, %s) to nearest allowed size' % (width, height))
maxdim = (width, height)
if maxwidth and width > maxwidth:
logger.debug('Width exceeds maxwidth %s' % maxwidth)
maxdim = (maxwidth, maxdim[1])
if maxheight and height > maxheight:
logger.debug('Height exceeds maxheight %s' % maxheight)
maxdim = (maxdim[0], maxheight)
dims = getattr(self, 'DIMENSIONS', settings.RESOURCE_DEFAULT_DIMENSIONS)
smaller = lambda x, y: x[0] <= y[0] and x[1] <= y[1]
valid_sizes = [d for d in dims if smaller(d, maxdim)]
if valid_sizes:
valid_sizes.sort(reverse=True)
logger.debug('Nearest allowed size for %s: %s' % (maxdim, valid_sizes))
return valid_sizes[0]
else:
logger.debug('No appropriate size found. Returning default %s' % (maxdim,))
return maxdim
class InternalProvider(Provider):
"""
An internal provider is meant as a means to abstract locally provided
rich content resources. In essence, this means that these type of providers
require no network operations. Instead, implementers should provide a specific
mechanism to retrieve an object-specific instance :func:`get_object`.
Properly implemented providers should follow a basic contract
* Implement :func:`get_object` as a means to convert a URL to an instance
* Define attribute ``DIMENSIONS`` of integer two-tuples
* Define attribute ``html_template`` that is a str template path used to render
object specific HTML for embedding
* Define attribute ``url_schemes`` as a list of asterisk wildcard URL patterns
* Define attribute ``resource_type`` that is a valid OEmbed type
* Define attributes ``DEFAULT_WIDTH`` and ``DEFAULT_HEIGHT`` as fallback
dimensions in case oembed consumers do not specify maximum dimensions
Providers should also define properties or methods the correspond
to names of OEmbed resource attributes. These are listed in :mod:`monocle.settings`
under ``RESOURCE_REQUIRED_ATTRS`` and ``RESOURCE_OPTIONAL_ATTRS``.
Implementations are not automatically must be added manually via
:class:`ProviderRegistry`:
from monocle.providers import registry
registry.register(MyInternalProvider)
"""
# A list of tuples of valid size dimensions: (width, height)
DIMENSIONS = []
html_template = None
expose = settings.EXPOSE_LOCAL_PROVIDERS
api_endpoint = 'http://localhost/'
_internal = True
# Internal providers are specific instances
_params = {}
@classmethod
def get_object(cls, url):
"""
A mechanism to convert a URL for some rich content and return a specific
instance of InternalProvider. This should be implemented by subclasses
of :class:`InternalProvider` otherwise NotImplementedError will be raised.
Implementers should ensure this method returns a provider instance or None
if no suitable provider can be found for the given URL.
:param string url: URL to convert to provider instance
:returns: A provider instance or None
"""
raise NotImplementedError
@classmethod
def match(cls, url):
if cls.url_schemes:
return re.match(cls.schemes_to_regex_str(cls.url_schemes), url, re.I)
else:
logger.warning('No URL schemes defined for provider %s' % cls.__name__)
return False
def render_html(self, data):
"""
Helper to directly render data to the html_template of this provider.
:param dict data: Data that is passed directly to the template as a context
:returns: Rendered template
"""
if not self.html_template:
return ''
template = get_template(self.html_template)
return template.render(Context(data))
@property
def maxwidth(self):
return self._params.get('maxwidth', None) or getattr(self, 'DEFAULT_WIDTH', None)
@property
def maxheight(self):
return self._params.get('maxheight', None) or getattr(self, 'DEFAULT_HEIGHT', None)
@property
def width(self):
# TODO: Is this the right way to handle this? Expensive?
return self.nearest_allowed_size(self.maxwidth, self.maxheight)[0]
@property
def height(self):
# TODO: Is this the right way to handle this? Expensive?
return self.nearest_allowed_size(self.maxwidth, self.maxheight)[1]
def _data_attribute(self, name, required=False):
"""
Gets an attribute as should be defined by an implementer.
Raises NotImplementedError if required by resource type but not
implemented and indicated as required
Example::
class VideoProvider(LocalProvider):
resource_type = 'video'
html_template = 'path/to/embed/template.html'
def html(self):
return self.render_html(data)
@property
def width(self):
return 100
@property
def height(self):
return 100
"""
attr = getattr(self, name, None)
if callable(attr):
attr = attr()
if attr is None and required:
raise NotImplementedError
return attr
def _check_dimension(self, width, height, maxwidth=None, maxheight=None, message=None):
"""
Raises a warning with optional message if width and height exceeds the maximum
allowable size as defined by this provider
"""
new_width, new_height = self.nearest_allowed_size(width, height,
maxwidth=maxwidth,
maxheight=maxheight)
if new_width < width or new_height < height:
warnings.warn(message or 'Resource size exceeds allowable dimensions')
def _build_resource(self, **kwargs):
"""
Constructs a valid JSON resource response complying to OEmbed spec based
on the attributes exposed by the provider
"""
# These are always required
url = kwargs.get('url')
data = {
'type': self.resource_type,
'version': '1.0'
}
# Apply required attributes by resource type
for attr in settings.RESOURCE_REQUIRED_ATTRS.get(self.resource_type, []):
data[attr] = self._data_attribute(attr, required=True)
# Optional attributes
for attr in settings.RESOURCE_OPTIONAL_ATTRS:
data[attr] = self._data_attribute(attr)
# Raise a warning if width/height exceed maximum requested and scale
# TODO: I'm still not convinced this is the right way to handle this
if settings.RESOURCE_CHECK_INTERNAL_SIZE:
if 'width' in data and 'height' in data:
self._check_dimension(data['width'], data['height'],
maxwidth=kwargs.get('maxwidth'),
maxheight=kwargs.get('maxheight'))
if 'thumbnail_width' in data and 'thumbnail_height' in data:
self._check_dimension(data['thumbnail_width'], data['thumbnail_height'],
maxwidth=kwargs.get('maxwidth'),
maxheight=kwargs.get('maxheight'),
message='Thumbnail size exceeds allowable dimensions')
return Resource(url, data)
def get_resource(self, url, **kwargs):
self._params = kwargs
self._params['url'] = url
# Only support JSON format
self._params['format'] = 'json'
if settings.CACHE_INTERNAL_PROVIDERS:
cache_key = self.get_request_url(**self._params)
logger.debug('Checking InternalProvider cache for key %s' % cache_key)
cached, primed = cache.get_or_prime(cache_key, primer=Resource(url))
if primed or cached.is_stale:
logger.debug('Rebuilding new or stale internal provider resource at %s' % url)
# This is just a safeguard in case the rebuild takes a little time
if cached.is_stale:
cache.set(cache_key, cached.refresh())
cached = self._build_resource(**self._params)
cache.set(cache_key, cached)
return cached
# No caching, build directly
return self._build_resource(**self._params)
class ProviderRegistry(object):
"""
An in-memory storage mechanism for all provider implementations.
Currently, external providers are pre-populated as instances of
:class:`ThirdPartyProvider`. Implementations of :class:`InternalProvider`
need to be manually added to registry::
from monocle.providers import registry
registry.register(MyProvider)
"""
# Separate internal and external providers. Prefer internal first
_providers = {'internal': [], 'external': []}
def __contains__(self, provider):
"""
Checks if a provider instance or class is in the registry
:param provider: A :class:`Provider` instance or subclass
:returns: True if in registry, False otherwise
"""
return provider in self._providers[self._provider_type(provider)]
def ensure_populated(self):
"""
Ensures the external provider cache is pre-populated with all external
provider instances. This will run only if the internal cache of external
providers is empty
"""
# BOO circular import prevention
from monocle.models import ThirdPartyProvider
# Models have post_save/delete signals. We only need to ensure once
if self._providers['external']:
return
# Populate with things we know about: models - ONLY IF THE DB IS SYNCED
if synced(ThirdPartyProvider):
self._providers['external'] = list(ThirdPartyProvider.objects.all())
def _provider_type(self, provider):
"""
Resolves the provider type as internal or external. This is done
by checking the ``_internal`` attribute of the given parameter
:param provider: A :class:`Provider` instance or subclass
:returns: str 'internal' if an internal provider, str 'external' otherwise
"""
return 'internal' if provider._internal else 'external'
def clear(self):
"""
Clears the internal provider registry
"""
self._providers = {'internal': [], 'external': []}
def update(self, provider):
"""
Updates an entry in the registry with one provided. If the provider
does not exist in the registry, it is silently added.
:param provider: A :class:`Provider` instance or subclass
"""
type = self._provider_type(provider)
try:
idx = self._providers[type].index(provider)
except ValueError:
# Provider not in the registry
self._providers[type].append(provider)
logger.debug('Adding provider %s to %s registry' % (provider, type))
else:
self._providers[type][idx] = provider
logger.debug('Updating provider %s to %s registry' % (provider, type))
def unregister(self, provider):
"""
Removes a provider from the registry.
:param provider: A :class:`Provider` instance or subclass
"""
type = self._provider_type(provider)
logger.debug('Removing provider %s to %s registry' % (provider, type))
try:
self._providers[type].remove(provider)
except ValueError:
# Provider not in the list
pass
def match(self, url):
"""
Locates the first provider that matches the URL. This
prefers matching internal providers over external providers
:param string url: URL to match a provider against
:returns: A provider instance or None if no match is found
"""
logger.debug('Locating provider match for %s' % url)
return self.match_type(url, 'internal') or self.match_type(url, 'external')
def match_type(self, url, type):
"""
Searches the internal provider registry for a matching
provider for the url based on type
:param string url: URL to match a provider against
:param string type: The type of provider to check (either 'internal' or 'external')
:returns: A provider instance or None if no match is found
"""
matched = None
for provider in self._providers[type]:
if provider.match(url):
matched = provider
break
# If the match is internal, obtain specific instance
if matched and hasattr(matched, 'get_object'):
try:
matched = matched.get_object(url)
except Exception:
logger.exception('InternalProvider %s get_object failed' % matched)
matched = None
if getattr(matched, 'is_active', True):
return matched
else:
return None
def register(self, provider):
"""
Adds an internal provider class to the registry.
:param provider: A subclass of :class:`InternalProvider`
:raises: :class:`InvalidProvider` if the supplied param is not a valid subclass
"""
registry.ensure_populated()
if not isinstance(provider, Provider):
try:
if not issubclass(provider, InternalProvider):
raise InvalidProvider('Object %s is not a valid Provider type' % provider)
except TypeError:
raise InvalidProvider('Object %s is not a valid Provider type' % provider)
type = self._provider_type(provider)
self._providers[type].append(provider)
logger.debug('Adding provider %s to %s registry' % (provider, type))
registry = ProviderRegistry()
|
|
##
# @filename : epd7in5.py
# @brief : Implements for Dual-color e-paper library
# @author : Yehui from Waveshare
#
# Copyright (C) Waveshare July 10 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import epdif
import RPi.GPIO as GPIO
# Display resolution
EPD_WIDTH = 640
EPD_HEIGHT = 384
# EPD7IN5 commands
PANEL_SETTING = 0x00
POWER_SETTING = 0x01
POWER_OFF = 0x02
POWER_OFF_SEQUENCE_SETTING = 0x03
POWER_ON = 0x04
POWER_ON_MEASURE = 0x05
BOOSTER_SOFT_START = 0x06
DEEP_SLEEP = 0x07
DATA_START_TRANSMISSION_1 = 0x10
DATA_STOP = 0x11
DISPLAY_REFRESH = 0x12
IMAGE_PROCESS = 0x13
LUT_FOR_VCOM = 0x20
LUT_BLUE = 0x21
LUT_WHITE = 0x22
LUT_GRAY_1 = 0x23
LUT_GRAY_2 = 0x24
LUT_RED_0 = 0x25
LUT_RED_1 = 0x26
LUT_RED_2 = 0x27
LUT_RED_3 = 0x28
LUT_XON = 0x29
PLL_CONTROL = 0x30
TEMPERATURE_SENSOR_COMMAND = 0x40
TEMPERATURE_CALIBRATION = 0x41
TEMPERATURE_SENSOR_WRITE = 0x42
TEMPERATURE_SENSOR_READ = 0x43
VCOM_AND_DATA_INTERVAL_SETTING = 0x50
LOW_POWER_DETECTION = 0x51
TCON_SETTING = 0x60
TCON_RESOLUTION = 0x61
SPI_FLASH_CONTROL = 0x65
REVISION = 0x70
GET_STATUS = 0x71
AUTO_MEASUREMENT_VCOM = 0x80
READ_VCOM_VALUE = 0x81
VCM_DC_SETTING = 0x82
class EPD:
def __init__(self):
self.reset_pin = epdif.RST_PIN
self.dc_pin = epdif.DC_PIN
self.busy_pin = epdif.BUSY_PIN
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
def digital_write(self, pin, value):
epdif.epd_digital_write(pin, value)
def digital_read(self, pin):
return epdif.epd_digital_read(pin)
def delay_ms(self, delaytime):
epdif.epd_delay_ms(delaytime)
def send_command(self, command):
self.digital_write(self.dc_pin, GPIO.LOW)
# the parameter type is list but not int
# so use [command] instead of command
epdif.spi_transfer([command])
def send_data(self, data):
self.digital_write(self.dc_pin, GPIO.HIGH)
# the parameter type is list but not int
# so use [data] instead of data
epdif.spi_transfer([data])
def init(self):
if (epdif.epd_init() != 0):
return -1
self.reset()
self.send_command(POWER_SETTING)
self.send_data(0x37)
self.send_data(0x00)
self.send_command(PANEL_SETTING)
self.send_data(0xCF)
self.send_data(0x08)
self.send_command(BOOSTER_SOFT_START)
self.send_data(0xc7)
self.send_data(0xcc)
self.send_data(0x28)
self.send_command(POWER_ON)
self.wait_until_idle()
self.send_command(PLL_CONTROL)
self.send_data(0x3c)
self.send_command(TEMPERATURE_CALIBRATION)
self.send_data(0x00)
self.send_command(VCOM_AND_DATA_INTERVAL_SETTING)
self.send_data(0x77)
self.send_command(TCON_SETTING)
self.send_data(0x22)
self.send_command(TCON_RESOLUTION)
self.send_data(0x02) #source 640
self.send_data(0x80)
self.send_data(0x01) #gate 384
self.send_data(0x80)
self.send_command(VCM_DC_SETTING)
self.send_data(0x1E) #decide by LUT file
self.send_command(0xe5) #FLASH MODE
self.send_data(0x03)
def wait_until_idle(self):
while(self.digital_read(self.busy_pin) == 0): # 0: busy, 1: idle
self.delay_ms(100)
def reset(self):
self.digital_write(self.reset_pin, GPIO.LOW) # module reset
self.delay_ms(200)
self.digital_write(self.reset_pin, GPIO.HIGH)
self.delay_ms(200)
def display_image(self, image, black, red):
assert (image.size == (self.width, self.height))
pixels = image.load()
self.send_command(DATA_START_TRANSMISSION_1)
for y in range(0, self.height):
for x in range(0, self.width, 2):
data = 0
if pixels[x, y] == black:
data |= 0x00
elif pixels[x, y] == red:
data |= 0x40
else:
data |= 0x30
if pixels[x + 1, y] == black:
data |= 0x00
elif pixels[x + 1, y] == red:
data |= 0x04
else:
data |= 0x03
self.send_data(data)
self.send_command(DISPLAY_REFRESH)
self.delay_ms(100)
self.wait_until_idle()
def display_qimage(self, image, black, red):
assert(image.size().width() == self.width)
assert(image.size().height() == self.height)
self.send_command(DATA_START_TRANSMISSION_1)
for y in range(0, self.height):
for x in range(0, self.width, 2):
data = 0
pixel = image.pixel(x, y)
if pixel == black:
data |= 0x00
elif pixel == red:
data |= 0x40
else:
data |= 0x30
pixel = image.pixel(x + 1, y)
if pixel == black:
data |= 0x00
elif pixel == red:
data |= 0x04
else:
data |= 0x03
self.send_data(data)
self.send_command(DISPLAY_REFRESH)
self.delay_ms(100)
self.wait_until_idle()
def sleep(self):
self.send_command(POWER_OFF)
self.wait_until_idle()
self.send_command(DEEP_SLEEP)
self.send_data(0xa5)
### END OF FILE ###
|
|
#
# File : mkdir.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2018, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2017-10-04 Bernard The first version
import os
import shutil
from shutil import ignore_patterns
def do_copy_file(src, dst):
# check source file
if not os.path.exists(src):
return
path = os.path.dirname(dst)
# mkdir if path not exist
if not os.path.exists(path):
os.makedirs(path)
shutil.copy2(src, dst)
def do_copy_folder(src_dir, dst_dir, ignore=None):
import shutil
# check source directory
if not os.path.exists(src_dir):
return
try:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
except:
print('Deletes folder: %s failed.' % dst_dir)
return
shutil.copytree(src_dir, dst_dir, ignore = ignore)
source_ext = ['c', 'h', 's', 'S', 'cpp', 'xpm']
source_list = []
def walk_children(child):
global source_list
global source_ext
# print child
full_path = child.rfile().abspath
file_type = full_path.rsplit('.',1)[1]
#print file_type
if file_type in source_ext:
if full_path not in source_list:
source_list.append(full_path)
children = child.all_children()
if children != []:
for item in children:
walk_children(item)
def walk_kconfig(RTT_ROOT, source_list):
for parent, dirnames, filenames in os.walk(RTT_ROOT):
if 'bsp' in parent:
continue
if '.git' in parent:
continue
if 'tools' in parent:
continue
if 'Kconfig' in filenames:
pathfile = os.path.join(parent, 'Kconfig')
source_list.append(pathfile)
if 'KConfig' in filenames:
pathfile = os.path.join(parent, 'KConfig')
source_list.append(pathfile)
def bsp_copy_files(bsp_root, dist_dir):
# copy BSP files
do_copy_folder(os.path.join(bsp_root), dist_dir,
ignore_patterns('build', 'dist', '*.pyc', '*.old', '*.map', 'rtthread.bin', '.sconsign.dblite', '*.elf', '*.axf', 'cconfig.h'))
def bsp_update_sconstruct(dist_dir):
with open(os.path.join(dist_dir, 'SConstruct'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'SConstruct'), 'w') as f:
for line in data:
if line.find('RTT_ROOT') != -1:
if line.find('sys.path') != -1:
f.write('# set RTT_ROOT\n')
f.write('if not os.getenv("RTT_ROOT"): \n RTT_ROOT="rt-thread"\n\n')
f.write(line)
def bsp_update_kconfig(dist_dir):
# change RTT_ROOT in Kconfig
if not os.path.isfile(os.path.join(dist_dir, 'Kconfig')):
return
with open(os.path.join(dist_dir, 'Kconfig'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'Kconfig'), 'w') as f:
found = 0
for line in data:
if line.find('RTT_ROOT') != -1:
found = 1
if line.find('default') != -1 and found:
position = line.find('default')
line = line[0:position] + 'default "rt-thread"\n'
found = 0
f.write(line)
def bsp_update_kconfig_library(dist_dir):
# change RTT_ROOT in Kconfig
if not os.path.isfile(os.path.join(dist_dir, 'Kconfig')):
return
with open(os.path.join(dist_dir, 'Kconfig'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'Kconfig'), 'w') as f:
found = 0
for line in data:
if line.find('RTT_ROOT') != -1:
found = 1
if line.find('../libraries') != -1 and found:
position = line.find('../libraries')
line = line[0:position] + 'libraries/Kconfig"\n'
found = 0
f.write(line)
# change board/kconfig path
if not os.path.isfile(os.path.join(dist_dir, 'board/Kconfig')):
return
with open(os.path.join(dist_dir, 'board/Kconfig'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'board/Kconfig'), 'w') as f:
for line in data:
if line.find('../libraries/HAL_Drivers/Kconfig') != -1:
position = line.find('../libraries/HAL_Drivers/Kconfig')
line = line[0:position] + 'libraries/HAL_Drivers/Kconfig"\n'
f.write(line)
def bs_update_ide_project(bsp_root, rtt_root, rttide = None):
import subprocess
# default update the projects which have template file
if rttide == None:
tgt_dict = {'mdk4':('keil', 'armcc'),
'mdk5':('keil', 'armcc'),
'iar':('iar', 'iar'),
'vs':('msvc', 'cl'),
'vs2012':('msvc', 'cl'),
'cdk':('gcc', 'gcc')}
else:
item = 'eclipse --project-name=' + rttide['project_name']
tgt_dict = {item:('gcc', 'gcc')}
scons_env = os.environ.copy()
scons_env['RTT_ROOT'] = rtt_root
for item in tgt_dict:
child = subprocess.Popen('scons --target=' + item, cwd=bsp_root, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = child.communicate()
if child.returncode == 0:
print('update %s project' % item)
def zip_dist(dist_dir, dist_name):
import zipfile
zip_filename = os.path.join(dist_dir)
zip = zipfile.ZipFile(zip_filename + '.zip', 'w')
pre_len = len(os.path.dirname(dist_dir))
for parent, dirnames, filenames in os.walk(dist_dir):
for filename in filenames:
pathfile = os.path.join(parent, filename)
arcname = pathfile[pre_len:].strip(os.path.sep)
zip.write(pathfile, arcname)
zip.close()
def MkDist_Strip(program, BSP_ROOT, RTT_ROOT, Env):
global source_list
print('make distribution and strip useless files....')
dist_name = os.path.basename(BSP_ROOT)
dist_dir = os.path.join(BSP_ROOT, 'dist-strip', dist_name)
target_path = os.path.join(dist_dir, 'rt-thread')
print('=> %s' % os.path.basename(BSP_ROOT))
bsp_copy_files(BSP_ROOT, dist_dir)
# copy stm32 bsp libiary files
if os.path.basename(os.path.dirname(BSP_ROOT)) == 'stm32':
print("=> copy stm32 bsp library")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
library_dir = os.path.join(dist_dir, 'libraries')
bsp_copy_files(os.path.join(library_path, 'HAL_Drivers'), os.path.join(library_dir, 'HAL_Drivers'))
bsp_copy_files(os.path.join(library_path, Env['bsp_lib_type']), os.path.join(library_dir, Env['bsp_lib_type']))
shutil.copyfile(os.path.join(library_path, 'Kconfig'), os.path.join(library_dir, 'Kconfig'))
# do bsp special dist handle
if 'dist_handle' in Env:
print("=> start dist handle")
dist_handle = Env['dist_handle']
dist_handle(BSP_ROOT, dist_dir)
# get all source files from program
for item in program:
walk_children(item)
source_list.sort()
# copy the source files without libcpu and components/libc in RT-Thread
target_list = []
libcpu_dir = os.path.join(RTT_ROOT, 'libcpu').lower()
libc_dir = os.path.join(RTT_ROOT, 'components', 'libc', 'compilers').lower()
sal_dir = os.path.join(RTT_ROOT, 'components', 'net', 'sal_socket').lower()
sources_include_sal = False
for src in source_list:
if src.lower().startswith(BSP_ROOT.lower()):
continue
# skip libc and libcpu dir
if src.lower().startswith(libcpu_dir):
continue
if src.lower().startswith(libc_dir):
continue
if src.lower().startswith(sal_dir):
sources_include_sal = True
continue
if src.lower().startswith(RTT_ROOT.lower()):
target_list.append(src)
source_list = target_list
# get source directory
src_dir = []
for src in source_list:
src = src.replace(RTT_ROOT, '')
if src[0] == os.sep or src[0] == '/':
src = src[1:]
path = os.path.dirname(src)
sub_path = path.split(os.sep)
full_path = RTT_ROOT
for item in sub_path:
full_path = os.path.join(full_path, item)
if full_path not in src_dir:
src_dir.append(full_path)
# add all of SConscript files
for item in src_dir:
source_list.append(os.path.join(item, 'SConscript'))
# add all of Kconfig files
walk_kconfig(RTT_ROOT, source_list)
# copy all files to target directory
source_list.sort()
for src in source_list:
dst = src.replace(RTT_ROOT, '')
if dst[0] == os.sep or dst[0] == '/':
dst = dst[1:]
print('=> %s' % dst)
dst = os.path.join(target_path, dst)
do_copy_file(src, dst)
# copy tools directory
print('=> tools')
do_copy_folder(os.path.join(RTT_ROOT, 'tools'), os.path.join(target_path, 'tools'), ignore_patterns('*.pyc'))
do_copy_file(os.path.join(RTT_ROOT, 'Kconfig'), os.path.join(target_path, 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(RTT_ROOT, 'COPYING'), os.path.join(target_path, 'COPYING'))
do_copy_file(os.path.join(RTT_ROOT, 'README.md'), os.path.join(target_path, 'README.md'))
do_copy_file(os.path.join(RTT_ROOT, 'README_zh.md'), os.path.join(target_path, 'README_zh.md'))
print('=> %s' % os.path.join('components', 'libc', 'compilers'))
do_copy_folder(os.path.join(RTT_ROOT, 'components', 'libc', 'compilers'), os.path.join(target_path, 'components', 'libc', 'compilers'))
if sources_include_sal:
print('=> %s' % os.path.join('components', 'net', 'sal_socket'))
do_copy_folder(os.path.join(RTT_ROOT, 'components', 'net', 'sal_socket'), os.path.join(target_path, 'components', 'net', 'sal_socket'))
# copy all libcpu/ARCH directory
import rtconfig
print('=> %s' % (os.path.join('libcpu', rtconfig.ARCH, rtconfig.CPU)))
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, rtconfig.CPU), os.path.join(target_path, 'libcpu', rtconfig.ARCH, rtconfig.CPU))
if os.path.exists(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, 'common')):
print('=> %s' % (os.path.join('libcpu', rtconfig.ARCH, 'common')))
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, 'common'), os.path.join(target_path, 'libcpu', rtconfig.ARCH, 'common'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'Kconfig'), os.path.join(target_path, 'libcpu', 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'SConscript'), os.path.join(target_path, 'libcpu', 'SConscript'))
# change RTT_ROOT in SConstruct
bsp_update_sconstruct(dist_dir)
# change RTT_ROOT in Kconfig
bsp_update_kconfig(dist_dir)
bsp_update_kconfig_library(dist_dir)
# update all project files
bs_update_ide_project(dist_dir, target_path)
# make zip package
zip_dist(dist_dir, dist_name)
print('done!')
def MkDist(program, BSP_ROOT, RTT_ROOT, Env, rttide = None):
print('make distribution....')
dist_name = os.path.basename(BSP_ROOT)
if rttide == None:
dist_dir = os.path.join(BSP_ROOT, 'dist', dist_name)
else:
dist_dir = rttide['project_path']
target_path = os.path.join(dist_dir, 'rt-thread')
# copy BSP files
print('=> %s' % os.path.basename(BSP_ROOT))
bsp_copy_files(BSP_ROOT, dist_dir)
# do bsp special dist handle
if 'dist_handle' in Env:
print("=> start dist handle")
dist_handle = Env['dist_handle']
dist_handle(BSP_ROOT, dist_dir)
# copy tools directory
print('=> components')
do_copy_folder(os.path.join(RTT_ROOT, 'components'), os.path.join(target_path, 'components'))
# skip documentation directory
# skip examples
# copy include directory
print('=> include')
do_copy_folder(os.path.join(RTT_ROOT, 'include'), os.path.join(target_path, 'include'))
# copy all libcpu/ARCH directory
print('=> libcpu')
import rtconfig
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH), os.path.join(target_path, 'libcpu', rtconfig.ARCH))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'Kconfig'), os.path.join(target_path, 'libcpu', 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'SConscript'), os.path.join(target_path, 'libcpu', 'SConscript'))
# copy src directory
print('=> src')
do_copy_folder(os.path.join(RTT_ROOT, 'src'), os.path.join(target_path, 'src'))
# copy tools directory
print('=> tools')
do_copy_folder(os.path.join(RTT_ROOT, 'tools'), os.path.join(target_path, 'tools'), ignore_patterns('*.pyc'))
do_copy_file(os.path.join(RTT_ROOT, 'Kconfig'), os.path.join(target_path, 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(RTT_ROOT, 'COPYING'), os.path.join(target_path, 'COPYING'))
do_copy_file(os.path.join(RTT_ROOT, 'README.md'), os.path.join(target_path, 'README.md'))
do_copy_file(os.path.join(RTT_ROOT, 'README_zh.md'), os.path.join(target_path, 'README_zh.md'))
# change RTT_ROOT in SConstruct
bsp_update_sconstruct(dist_dir)
# change RTT_ROOT in Kconfig
bsp_update_kconfig(dist_dir)
bsp_update_kconfig_library(dist_dir)
# update all project files
if rttide == None:
bs_update_ide_project(dist_dir, target_path)
else:
bs_update_ide_project(dist_dir, target_path, rttide)
# make zip package
if rttide == None:
zip_dist(dist_dir, dist_name)
print('done!')
|
|
import pytest
import logging
import os
import subprocess
from distutils.version import LooseVersion
from cassandra import ConsistencyLevel
from ccmlib.common import is_win
from ccmlib.node import handle_external_tool_process, ToolError
import ccmlib.repository
from dtest import Tester, create_ks, create_cf
from tools.assertions import assert_length_equal
from tools.data import insert_c1c2
from tools.jmxutils import (JolokiaAgent, make_mbean,
remove_perf_disable_shared_mem)
since = pytest.mark.since
logger = logging.getLogger(__name__)
@since("2.2", max_version="4")
class TestDeprecatedRepairAPI(Tester):
"""
@jira_ticket CASSANDRA-9570
Test if deprecated repair JMX API runs with expected parameters
"""
def test_force_repair_async_1(self):
"""
test forceRepairAsync(String keyspace, boolean isSequential,
Collection<String> dataCenters,
Collection<String> hosts,
boolean primaryRange, boolean fullRepair, String... columnFamilies)
"""
opt = self._deprecated_repair_jmx("forceRepairAsync(java.lang.String,boolean,java.util.Collection,java.util.Collection,boolean,boolean,[Ljava.lang.String;)",
['ks', True, [], [], False, False, ["cf"]])
assert opt["parallelism"], "parallel" if is_win() else "sequential" == opt
assert opt["primary_range"], "false" == opt
assert opt["incremental"], "true" == opt
assert opt["job_threads"], "1" == opt
assert opt["data_centers"], "[]" == opt
assert opt["hosts"], "[]" == opt
assert opt["column_families"], "[cf]" == opt
def test_force_repair_async_2(self):
"""
test forceRepairAsync(String keyspace, int parallelismDegree,
Collection<String> dataCenters,
Collection<String> hosts,
boolean primaryRange, boolean fullRepair, String... columnFamilies)
"""
opt = self._deprecated_repair_jmx("forceRepairAsync(java.lang.String,int,java.util.Collection,java.util.Collection,boolean,boolean,[Ljava.lang.String;)",
['ks', 1, [], [], True, True, []])
assert opt["parallelism"], "parallel" == opt
assert opt["primary_range"], "true" == opt
assert opt["incremental"], "false" == opt
assert opt["job_threads"], "1" == opt
assert opt["data_centers"], "[]" == opt
assert opt["hosts"], "[]" == opt
assert opt["column_families"], "[]" == opt
def test_force_repair_async_3(self):
"""
test forceRepairAsync(String keyspace, boolean isSequential,
boolean isLocal, boolean primaryRange,
boolean fullRepair, String... columnFamilies)
"""
opt = self._deprecated_repair_jmx("forceRepairAsync(java.lang.String,boolean,boolean,boolean,boolean,[Ljava.lang.String;)",
['ks', False, False, False, False, ["cf"]])
assert opt["parallelism"], "parallel" == opt
assert opt["primary_range"], "false" == opt
assert opt["incremental"], "true" == opt
assert opt["job_threads"], "1" == opt
assert opt["data_centers"], "[]" == opt
assert opt["hosts"], "[]" == opt
assert opt["column_families"], "[cf]" == opt
def test_force_repair_range_async_1(self):
"""
test forceRepairRangeAsync(String beginToken, String endToken,
String keyspaceName, boolean isSequential,
Collection<String> dataCenters,
Collection<String> hosts, boolean fullRepair,
String... columnFamilies)
"""
opt = self._deprecated_repair_jmx("forceRepairRangeAsync(java.lang.String,java.lang.String,java.lang.String,boolean,java.util.Collection,java.util.Collection,boolean,[Ljava.lang.String;)",
["0", "1000", "ks", True, ["dc1"], [], False, ["cf"]])
assert opt["parallelism"], "parallel" if is_win() else "sequential" == opt
assert opt["primary_range"], "false" == opt
assert opt["incremental"], "true" == opt
assert opt["job_threads"], "1" == opt
assert opt["data_centers"], "[dc1]" == opt
assert opt["hosts"], "[]" == opt
assert opt["ranges"], "1" == opt
assert opt["column_families"], "[cf]" == opt
def test_force_repair_range_async_2(self):
"""
test forceRepairRangeAsync(String beginToken, String endToken,
String keyspaceName, int parallelismDegree,
Collection<String> dataCenters,
Collection<String> hosts,
boolean fullRepair, String... columnFamilies)
"""
opt = self._deprecated_repair_jmx("forceRepairRangeAsync(java.lang.String,java.lang.String,java.lang.String,int,java.util.Collection,java.util.Collection,boolean,[Ljava.lang.String;)",
["0", "1000", "ks", 2, [], [], True, ["cf"]])
assert opt["parallelism"], "parallel" if is_win() else "dc_parallel" == opt
assert opt["primary_range"], "false" == opt
assert opt["incremental"], "false" == opt
assert opt["job_threads"], "1" == opt
assert opt["data_centers"], "[]" == opt
assert opt["hosts"], "[]" == opt
assert opt["ranges"], "1" == opt
assert opt["column_families"], "[cf]" == opt
def test_force_repair_range_async_3(self):
"""
test forceRepairRangeAsync(String beginToken, String endToken,
String keyspaceName, boolean isSequential,
boolean isLocal, boolean fullRepair,
String... columnFamilies)
"""
opt = self._deprecated_repair_jmx("forceRepairRangeAsync(java.lang.String,java.lang.String,java.lang.String,boolean,boolean,boolean,[Ljava.lang.String;)",
["0", "1000", "ks", True, True, True, ["cf"]])
assert opt["parallelism"], "parallel" if is_win() else "sequential" == opt
assert opt["primary_range"], "false" == opt
assert opt["incremental"], "false" == opt
assert opt["job_threads"], "1" == opt
assert opt["data_centers"], "[dc1]" == opt
assert opt["hosts"], "[]" == opt
assert opt["ranges"], "1" == opt
assert opt["column_families"], "[cf]" == opt
def _deprecated_repair_jmx(self, method, arguments):
"""
* Launch a two node, two DC cluster
* Create a keyspace and table
* Insert some data
* Call the deprecated repair JMX API based on the arguments passed into this method
* Check the node log to see if the correct repair was performed based on the jmx args
"""
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate([1, 1])
node1, node2 = cluster.nodelist()
remove_perf_disable_shared_mem(node1)
cluster.start()
supports_pull_repair = cluster.version() >= LooseVersion('3.10')
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL)
# Run repair
mbean = make_mbean('db', 'StorageService')
with JolokiaAgent(node1) as jmx:
# assert repair runs and returns valid cmd number
assert jmx.execute_method(mbean, method, arguments) == 1
# wait for log to start
node1.watch_log_for("Starting repair command")
# get repair parameters from the log
line = node1.grep_log((r"Starting repair command #1" + (r" \([^\)]+\)" if cluster.version() >= LooseVersion("3.10") else "") +
r", repairing keyspace ks with repair options \(parallelism: (?P<parallelism>\w+), primary range: (?P<pr>\w+), "
r"incremental: (?P<incremental>\w+), job threads: (?P<jobs>\d+), ColumnFamilies: (?P<cfs>.+), dataCenters: (?P<dc>.+), "
r"hosts: (?P<hosts>.+), # of ranges: (?P<ranges>\d+)(, pull repair: (?P<pullrepair>true|false))?\)"))
assert_length_equal(line, 1)
line, m = line[0]
if supports_pull_repair:
assert m.group("pullrepair"), "false" == "Pull repair cannot be enabled through the deprecated API so the pull repair option should always be false."
return {"parallelism": m.group("parallelism"),
"primary_range": m.group("pr"),
"incremental": m.group("incremental"),
"job_threads": m.group("jobs"),
"column_families": m.group("cfs"),
"data_centers": m.group("dc"),
"hosts": m.group("hosts"),
"ranges": m.group("ranges")}
@since("3.0.16", max_version="4")
class TestDeprecatedRepairNotifications(Tester):
"""
* @jira_ticket CASSANDRA-13121
* Test if legacy JMX detects failures in repair jobs launched with the deprecated API.
* Affects cassandra-3.x clusters when users run JMX from cassandra-2.1 and older to submit repair jobs.
"""
def get_legacy_environment(self, legacy_version, node_env=None):
"""
* Set up an environment to run nodetool from cassandra-2.1.
"""
env = {}
if (node_env is not None):
env = node_env
legacy_dirpath = ccmlib.repository.directory_name(legacy_version)
env["CASSANDRA_HOME"] = legacy_dirpath
binpaths = [legacy_dirpath,
os.path.join(legacy_dirpath, "build", "classes", "main"),
os.path.join(legacy_dirpath, "build", "classes", "thrift")]
env["cassandra_bin"] = ":".join(binpaths)
env["CASSANDRA_CONF"] = os.path.join(legacy_dirpath, "conf")
classpaths = [env["CASSANDRA_CONF"], env["cassandra_bin"]]
for jar in os.listdir(os.path.join(legacy_dirpath, "lib")):
if (jar.endswith(".jar")):
classpaths.append(os.path.join(legacy_dirpath, "lib", jar))
env['CLASSPATH'] = ":".join(classpaths)
return env
def test_deprecated_repair_error_notification(self):
"""
* Check whether a legacy JMX nodetool understands the
* notification for a failed repair job.
"""
# This test intentionally provokes an error in a repair job
self.fixture_dtest_setup.ignore_log_patterns = [r'Repair failed', r'The current host must be part of the repair']
# start a 2-node cluster
logger.debug("Starting cluster...")
cluster = self.cluster
cluster.populate(2)
node1, node2 = cluster.nodelist()
cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
# write some data that could be repaired
logger.debug("Stressing node1...")
node1.stress(stress_options=['write', 'n=5000', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=5'])
# set up a legacy repository
logger.debug("Setting up legacy repository...")
legacy_version = 'github:apache/cassandra-2.1'
ccmlib.repository.setup(legacy_version)
# Run repair with legacy nodetool.
# The options specified will cause an error, and legacy nodetool should error out.
logger.debug("Running repair on node1 using legacy nodetool (using options that will cause failure with error)")
legacy_dirpath = ccmlib.repository.directory_name(legacy_version)
legacy_nodetool_path = os.path.join(legacy_dirpath, "bin", "nodetool")
repair_env = self.get_legacy_environment(legacy_version, node_env=node1.get_env())
repair_args = [legacy_nodetool_path, "-h", "localhost", "-p", str(node1.jmx_port), "repair", "-hosts", "127.0.0.2"]
p = subprocess.Popen(repair_args, env=repair_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
nodetool_stderr = None
nodetool_returncode = None
try:
_, nodetool_stderr, _ = handle_external_tool_process(p, repair_args)
except ToolError as tool_error:
nodetool_stderr = tool_error.stderr
# Check for repair failed message in node1 log
repair_failed_logs = node1.grep_log(r"ERROR \[(Repair-Task|Thread)-\d+\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} RepairRunnable.java:\d+ - Repair failed")
assert len(repair_failed_logs) > 0, "Node logs don't have an error message for the failed repair"
# Check for error and stacktrace in nodetool output
assert nodetool_stderr.find("error") > -1, "Legacy nodetool didn't print an error message for the failed repair"
assert nodetool_stderr.find("-- StackTrace --") > -1, "Legacy nodetool didn't print a stack trace for the failed repair"
|
|
#Reeve D'Cunha AS91367 version 0.6
import tkinter as tk
import string, random
#from PIL import ImageTk, Image
class MainApp:
def __init__(self, master):
self.defaultbg = root.cget('bg')
self.master = master #Creates Frames
photo = tk.PhotoImage(file="icon.gif")
self.masterImage = tk.Label(master,image=photo,anchor=tk.NW)
self.masterImage.grid(row=0,column=0,sticky=tk.N+tk.W,padx=(40,0),pady=(10,0))
self.masterImage.photo = photo
self.master.grid_columnconfigure(0, weight=1)
self.master.grid_rowconfigure((0,1), weight=1)
self.topFrame = tk.Frame(self.master)
self.mainFrame = tk.Frame(self.master)
self.bottomFrame = tk.Frame(self.master)
self.topFrame.grid(row=0,column=0,pady=(0,10),sticky=tk.N)
self.mainFrame.grid(row=1,column=0,sticky=tk.N+tk.S+tk.E+tk.W)
self.mainFrame.grid_columnconfigure(0, weight=1)
self.bottomFrame.grid(row=2,column=0,sticky=tk.E+tk.W,pady=(10,0))
self.bottomFrame.grid_columnconfigure(0, weight=1) #Makes bottomFrame fill all the horizontal space
self.bottomFrame.grid_columnconfigure(1, weight=1) #Makes bottomFrame fill all the horizontal space
self.screenHeader = tk.Label(self.topFrame,font="-weight bold -size 13 -underline 1")
self.backButton = tk.Button(self.bottomFrame,font="-size 11 -weight bold",text="Back",width=10,height=2)
self.forwardButton = tk.Button(self.bottomFrame,font="-size 11 -weight bold",text="Next",width=10,height=2)
self.madeby= tk.Label(self.bottomFrame,text="Made by: Reeve D'Cunha",font="-size 9 -weight bold").grid(row=0,column=1,sticky=tk.W)
self.screenHeader.grid(row=0,column=0,pady=10)
self.backButton.grid(row=0,column=0,sticky=tk.W)
self.forwardButton.grid(row=0,column=2,sticky=tk.E)
self.reset(False) #Initialises arrays
self.choose_movie() #Brings first Frame into view
def reset(self,jump): #Resets all arrarys
self.movieDict = {"James Bourne":("10:30AM 3D","12:00PM","1:30PM 3D"),"Ghostbusters":("11:00AM","12:30PM 3D","2:00PM"),"Finding Dory":("10:45AM","12:15PM 3D","4:30PM 3D")}
self.chosenSession = {0:0,1:0} #Stores session(Movie, Time)
self.ticketsDict = {0:0,1:0,2:0,3:0} #Stores number of tickets (child,student,adult,senior)
self.bookedSeats = [] #Stores seats((seat name, btnIdent))
self.spinboxDict = {}
self.seatsDict = {} #Stores {btnIdent:(seatBtn,seat name)}
self.sbVars = {}
self.randomBooked = [] #Stores random "booked" seats (btnIdent)
self.runRandom_seats = True #gets random "booked" seats
if self.runRandom_seats == True:
for i in range(0,random.randint(1,13)):
randomNumber = random.randint(0,49)
if randomNumber not in self.randomBooked:
self.randomBooked.append(randomNumber)
self.runRandom_seats = False
if jump == True:
self.switch_frames(self.fsFrame,self.choose_movie)
def switch_frames(self,frameToRemove,functionToRun): #Function called by Back and Forward buttons
frameToRemove.grid_remove()
functionToRun()
""" PAGES """
def choose_movie(self):
self.screenHeader.configure(text="Choose a Movie and select Tickets")
self.cmFrame= tk.Frame(self.mainFrame)
self.cmFrame.grid(row=0,column=0)
self.movieFrame = tk.Frame(self.cmFrame)
self.ticketFrame = tk.Frame(self.cmFrame)
self.movieFrame.grid(row=1,column=0,padx=(0,70))
self.ticketFrame.grid(row=1,column=1,padx=(70,0))
self.movieListbox = tk.Listbox(self.movieFrame,bg="white",font="-size 11",width=15,height=6,exportselection=False)
self.timeListbox = tk.Listbox(self.movieFrame,bg="white",font="-size 11",width=15,height=6,exportselection=False)
self.surchargeWarning = tk.Label(self.movieFrame,fg="purple",font="-size 10 -weight bold",text="NOTE: Sessions marked as 3D have a \n$1 surcharge per ticket").grid(row=0,column=0,columnspan=2,sticky=tk.W+tk.E,pady=(0,10))
self.childLabel = tk.Label(self.ticketFrame,font="-size 11",text="Child Tickets ($12.50): ").grid(row=1,column=0,sticky=tk.E)
self.studentLabel = tk.Label(self.ticketFrame,font="-size 11",text="Student Tickets ($16.00): ").grid(row=2,column=0,sticky=tk.E)
self.AdultLabel = tk.Label(self.ticketFrame,font="-size 11",text="Adult Tickets ($18.00): ").grid(row=3,column=0,sticky=tk.E)
self.SeniorLabel = tk.Label(self.ticketFrame,font="-size 11",text="Senior Tickets ($12.50): ").grid(row=4,column=0,sticky=tk.E)
self.availableLabel= tk.Label(self.ticketFrame,font="-size 11 -weight bold",text="Seats Available: "+str(50-len(self.randomBooked))).grid(row=0,column=0,columnspan=2,pady=(0,10))
self.ticketError=tk.Label(self.ticketFrame, text = " \n",fg="white",font="-size 11 -weight bold")
self.ticketError.grid(row=5,column=0,columnspan=2,sticky=tk.W+tk.E,pady=(10,0))
for sbIdent in range (0,4):
self.var = tk.StringVar()
self.sbVars[sbIdent]=self.var
self.sbVars[sbIdent].set(self.ticketsDict[sbIdent])
self.spinBox = tk.Spinbox(self.ticketFrame,from_=0,to=50,width=5,font="-size 11",state="readonly",textvariable=self.var,readonlybackground="white")
self.spinBox.grid(row=sbIdent+1,column=1,pady=2.5,padx=(20,0))
self.spinboxDict[sbIdent]=self.spinBox
self.spinBox.bind('<Leave>',self.add_tickets)
self.movieListbox.grid(row=1,column=0,sticky=tk.E)
self.timeListbox.grid(row=1,column=1,sticky=tk.W)
for movie in self.movieDict:
self.movieListbox.insert(tk.END,movie)
self.movieListbox.select_set(self.chosenSession[0]) #Sets default movie
self.timeUpdate(switchMovie=False) #Initialises times
self.movieListbox.bind('<<ListboxSelect>>', lambda x: self.timeUpdate(switchMovie=True)) #Listens for change in movie then runs timeUpdate
self.timeListbox.bind('<<ListboxSelect>>',self.updateSession)
self.backButton.configure(state=tk.DISABLED)
self.forwardButton.configure(state=tk.NORMAL,text="Next",command=lambda: self.switch_frames(self.cmFrame,self.choose_seats))
def choose_seats(self):
self.screenHeader.configure(text="Choose Your Seats")
self.csFrame = tk.Frame(self.mainFrame)
self.csFrame.grid(row=0,column=0)
btnIdent = 0 #Identification for dynamically created buttons
for seatColumn in range(0,10):
seatLetter = string.ascii_uppercase[seatColumn]
for seatRow in range(1,6):
seatName = str(seatRow)+seatLetter
self.seatBtn = tk.Button(self.csFrame, text="Seat "+seatName,width=7,height=2,command= lambda seat=btnIdent: self.seat_click(seat))
self.seatBtn.grid(row=seatRow+1,column=seatColumn)
self.seatsDict[btnIdent]=(self.seatBtn,seatName)
btnIdent += 1
self.screenLabel=tk.Label(self.csFrame, text="FRONT OF CINEMA",bg="#800080",fg="white").grid(row=0,column=2,columnspan=6,sticky=tk.W+tk.E,pady=(0,10))
self.errorLabel=tk.Label(self.csFrame, text = " ",font="-size 11 -weight bold",fg="white")
self.errorLabel.grid(row=7,column=2,columnspan=6,sticky=tk.W+tk.E,pady=10)
self.chosenLabel = tk.Label(self.csFrame,font="-size 10",text="Number of Tickets booked: "+str(sum(self.ticketsDict.values()))).grid(row=8,column=1,columnspan=4,sticky=tk.W+tk.E)
self.chosenLabel = tk.Label(self.csFrame,font="-size 10",text="Seats Left to Choose: "+str(sum(self.ticketsDict.values())))
self.chosenLabel.grid(row=8,column=5,columnspan=4,sticky=tk.W+tk.E)
self.toggle_seats()
self.backButton.configure(state=tk.NORMAL,command=lambda: self.switch_frames(self.csFrame,self.choose_movie))
self.forwardButton.configure(state=tk.NORMAL if sum(self.ticketsDict.values())-len(self.bookedSeats) == 0 and len(self.bookedSeats) > 0 else tk.DISABLED,text="Next",command=lambda: self.switch_frames(self.csFrame,self.final_summary))
def final_summary(self):
self.screenHeader.configure(text="Summary")
self.fsFrame= tk.Frame(self.mainFrame)
self.fsFrame.grid(row=0,column=0)
self.seatsChosen = tk.Text(self.fsFrame,width=30,height=4,bg="white", state = tk.NORMAL)
self.seatsChosen.grid(row=2,column=0)
self.seatsChosen.insert(1.0,",".join(sorted([i[0] for i in self.bookedSeats],reverse=True)))
self.seatsChosen.configure(state=tk.DISABLED)
self.sessionOutput = tk.Label(self.fsFrame,text="You are watching "+self.movieListbox.get(self.movieListbox.curselection())+" at "+self.timeListbox.get(self.timeListbox.curselection()).replace("3D","")).grid(row=0,column=0,sticky=tk.N)
self.is3D = tk.Label(self.fsFrame, text = "3D Session: Yes" if '3D' in self.timeListbox.get(self.timeListbox.curselection()) else "3D Session: No").grid(row=1,column=0)
self.resetButton = tk.Button(self.fsFrame,text="reset",command=lambda :self.reset(True)).grid(row=5,column=0)
self.backButton.configure(state=tk.NORMAL,command=lambda: self.switch_frames(self.fsFrame,self.choose_seats))
#self.forwardButton.configure(state=tk.DISABLED,text="END")
self.forwardButton.configure(text="QUIT",command=quit)
""" FUNCTIONS FOR PAGES"""
def seat_click(self,btnIdent): #Run when seats are selected
if self.seatsDict[btnIdent][1] not in [x[0] for x in self.bookedSeats]:
if len(self.bookedSeats) < sum(self.ticketsDict.values()):
self.bookedSeats.append((self.seatsDict[btnIdent][1],btnIdent))
else:
self.errorLabel.configure(text="ERROR: TOO MANY SEATS SELECTED!", bg="red")
else:
self.bookedSeats.remove((self.seatsDict[btnIdent][1],btnIdent))
self.errorLabel.configure(text=" ",bg=self.defaultbg)
self.toggle_seats()
def toggle_seats(self):
for btnIdent in self.seatsDict:
if btnIdent in [x[1] for x in self.bookedSeats]:
self.seatsDict[btnIdent][0].configure(bg="green")
elif btnIdent in self.randomBooked:
self.seatsDict[btnIdent][0].configure(bg="red",state=tk.DISABLED,disabledforeground="black",text="BOOKED")
else:
self.seatsDict[btnIdent][0].configure(bg=self.defaultbg)
self.chosenLabel.configure(text="Seats Left to Choose: "+str(sum(self.ticketsDict.values())-len(self.bookedSeats)))
if sum(self.ticketsDict.values())-len(self.bookedSeats) == 0 and len(self.bookedSeats) > 0:
self.forwardButton.configure(state=tk.NORMAL)
else:
self.forwardButton.configure(state=tk.DISABLED)
def timeUpdate(self,*args,switchMovie):
print("hi")
movie = self.movieListbox.get(self.movieListbox.curselection()[0])
self.timeListbox.delete(0,tk.END) #Clears and fills with movie's time
for time in range(0,len(self.movieDict[movie])):
self.timeListbox.insert(tk.END,self.movieDict[movie][time])
#print(switchMovie)
self.timeListbox.select_set(0) if switchMovie == True else self.timeListbox.select_set(self.chosenSession[1]) #Sets default time
#self.bookedSeats = []
self.updateSession()
def updateSession(self,*args):
self.chosenSession[0]=self.movieListbox.curselection()
self.chosenSession[1]=self.timeListbox.curselection()
#
def add_tickets(self,*args):
for Ident in range (0,4):
self.ticketsDict[Ident] = int(self.spinboxDict[Ident].get())
if sum(self.ticketsDict.values()) > 50-len(self.randomBooked):
self.ticketError.configure(text="ERROR: TOO MANY TICKETS!\n TICKETS REQUESTED: "+str(sum(self.ticketsDict.values())), bg="red")
self.forwardButton.configure(state=tk.DISABLED)
else:
self.ticketError.configure(text=" \n",bg=self.defaultbg)
self.forwardButton.configure(state=tk.NORMAL)
self.bookedSeats = []
if __name__ == '__main__':
global root
root = tk.Tk()
app = MainApp(root)
root.minsize(width=850,height=420)
root.mainloop()
"""PAGE TEMPLATE
def choose_movie(self):
self.screenHeader.configure(text="Choose a Movie and select Tickets")
self.cmFrame= tk.Frame(self.mainFrame)
self.cmFrame.grid(row=0,column=0)
self.backButton.configure(state=tk.DISABLED)
self.forwardButton.configure(state=tk.NORMAL,command=lambda: self.switch_frames(self.cmFrame,self.choose_seats))
"""
|
|
from django import forms
from django.utils.safestring import mark_safe
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.db.models import Max, Min
from winthrop.books.models import Book
## Disabled input logic borrowed from PPA (ppa.archive.forms)
class SelectDisabledMixin(object):
'''
Mixin for :class:`django.forms.RadioSelect` or :class:`django.forms.CheckboxSelect`
classes to set an option as disabled. To disable, the widget's choice
label option should be passed in as a dictionary with `disabled` set
to True::
{'label': 'option', 'disabled': True}.
'''
# Using a solution at https://djangosnippets.org/snippets/2453/
def create_option(self, name, value, label, selected, index, subindex=None,
attrs=None):
disabled = None
if isinstance(label, dict):
label, disabled = label['label'], label['disabled']
option_dict = super().create_option(
name, value, label, selected, index,
subindex=subindex, attrs=attrs
)
if disabled:
option_dict['attrs'].update({'disabled': 'disabled'})
return option_dict
class RadioSelectWithDisabled(SelectDisabledMixin, forms.RadioSelect):
'''
Subclass of :class:`django.forms.RadioSelect` with option to mark
a choice as disabled.
'''
class FacetChoiceField(forms.MultipleChoiceField):
'''Add CheckboxSelectMultiple field with facets taken from solr query'''
# Borrowed from https://github.com/Princeton-CDH/derrida-django/blob/develop/derrida/books/forms.py
# customize multiple choice field for use with facets.
# no other adaptations needed
# - turn of choice validation (shouldn't fail if facets don't get loaded)
# - default to not required
# - use checkbox select multiple as default widget
widget = forms.CheckboxSelectMultiple
def __init__(self, *args, **kwargs):
if 'required' not in kwargs:
kwargs['required'] = False
super().__init__(*args, **kwargs)
def valid_value(self, value):
return True
# RangeWidget and RangeField also borrowed from Derrida/PPA codebase
class RangeWidget(forms.MultiWidget):
'''date range widget, for two numeric inputs'''
#: separator string when splitting out values in decompress
sep = '-'
#: template to use to render range multiwidget
# (based on multiwidget, but adds "to" between dates)
template_name = 'books/widgets/rangewidget.html'
def __init__(self, *args, **kwargs):
widgets = [
forms.NumberInput(),
forms.NumberInput()
]
super().__init__(widgets, *args, **kwargs)
def decompress(self, value):
if value:
return [int(val) for val in value.split(self.sep)]
return [None, None]
class RangeField(forms.MultiValueField):
widget = RangeWidget
def __init__(self, *args, **kwargs):
fields = (
forms.IntegerField(
error_messages={'invalid': 'Enter a number'},
validators=[
RegexValidator(r'^[0-9]*$', 'Enter a valid number.'),
],
required=False
),
forms.IntegerField(
error_messages={'invalid': 'Enter a number'},
validators=[
RegexValidator(r'^[0-9]*$', 'Enter a valid number.'),
],
required=False
),
)
kwargs['fields'] = fields
super().__init__(require_all_fields=False, *args, **kwargs)
def compress(self, data_list):
# if both values are set and the first is greater than the second,
# raise a validation error
if all(data_list) and len(data_list) == 2 and data_list[0] > data_list[1]:
raise ValidationError('Invalid range (%s - %s)' % (data_list[0], data_list[1]))
return self.widget.sep.join(['%d' % val if val else '' for val in data_list])
# pubdate min/max methods and range logic borrowed from PPA/Derrida codebase
class SearchForm(forms.Form):
'''Search form for searching across :class:`~winthrop.books.models.Books`.'''
SORT_CHOICES = [
('author_asc', 'Author A-Z'),
('author_desc', 'Author Z-A'),
('pub_year_asc', 'Year Oldest-Newest'),
('pub_year_desc', 'Year Newest-Oldest'),
('relevance', 'Relevance'),
]
defaults = {
'sort': 'author_asc',
}
query = forms.CharField(label='Keyword or Phrase', required=False)
sort = forms.ChoiceField(widget=RadioSelectWithDisabled, choices=SORT_CHOICES,
required=False)
# Solr facet choice fields
author = FacetChoiceField()
editor = FacetChoiceField()
# translator = FacetChoiceField() NOTE temp disabled as not lv1 feature
language = FacetChoiceField()
subject = FacetChoiceField()
annotator = FacetChoiceField()
# range choice fields
pub_year = RangeField(
label='Publication Year',
required=False,
widget=RangeWidget(attrs={
'size': 4,
'_inline': True
})
)
# map solr facet field to corresponding form input
solr_facet_fields = {
'author_exact': 'author',
'editor_exact': 'editor',
'translator_exact': 'translator',
'language_exact': 'language',
'subject_exact': 'subject',
'annotator_exact': 'annotator'
}
# TODO: Should this be a dict? Right now it doesn't need a different name
# to map to the fields used in the form.
range_facets = ['pub_year']
def __init__(self, data=None, *args, **kwargs):
'''
Set choices dynamically based on form kwargs and presence of keywords.
'''
super().__init__(data=data, *args, **kwargs)
pubdate_range = self.pub_date_minmax()
# because pubdate is a multifield/multiwidget, access the widgets
# under the multiwidgets
pubdate_widgets = self.fields['pub_year'].widget.widgets
for idx, val in enumerate(pubdate_range):
# don't set None as placeholder (only possible if db is empty)
if val:
# set placeholder and max/min values
pubdate_widgets[idx].attrs.update({'placeholder': val,
'min': pubdate_range[0], 'max': pubdate_range[1]})
# relevance is disabled unless we have a keyword query present
if not data or not data.get('query', None):
self.fields['sort'].widget.choices[-1] = \
('relevance', {'label': 'Relevance', 'disabled': True})
def set_choices_from_facets(self, facets):
# configure field choices based on facets returned from Solr
# (adapted from derrida codebase)
for facet, counts in facets.items():
# use field from facet fields map or else field name as is
formfield = self.solr_facet_fields.get(facet, facet)
if formfield in self.fields:
self.fields[formfield].choices = [
(val, mark_safe('%s <span>%d</span>' % (val, count)))
for val, count in counts.items()]
# map form sort options to solr sort field
solr_sort_fields = {
'relevance': 'score desc',
'pub_year_asc': 'pub_year asc',
'pub_year_desc': 'pub_year desc',
'author_asc': 'author_sort asc',
'author_desc': 'author_sort desc',
}
PUBDATE_CACHE_KEY = 'book_pubdate_maxmin'
def pub_date_minmax(self):
'''Get minimum and maximum values for
:class:`~winthrop.books.models.Book` publication dates
in the database. Used to set placeholder values for the form
input and to generate the Solr facet range query.
Value is cached to avoid repeatedly calculating it.
:returns: tuple of min, max
'''
maxmin = cache.get(self.PUBDATE_CACHE_KEY)
if not maxmin:
maxmin = Book.objects \
.aggregate(Max('pub_year'), Min('pub_year'))
# cache as returned from django; looks like this:
# {'pub_date__max': 1922, 'pub_date__min': 1559}
# don't cache if values are None
# should only happen if no data is in the db
if all(maxmin.values()):
cache.set(self.PUBDATE_CACHE_KEY, maxmin)
# return just the min and max values
return maxmin['pub_year__min'], maxmin['pub_year__max']
def get_solr_sort_field(self, sort):
'''
Set solr sort fields for the query based on sort and query strings.
:return: solr sort field
'''
# return solr field for requested sort option
return self.solr_sort_fields[sort]
|
|
import unittest
from prototype import prototype, PrototypeException, PrototypeSwitcher
class BaseClass(object):
def __init__(self, base_list):
print "in base ctr"
self.base_list = base_list
self.base_test = True
self.base_list.append('init')
def __getattr__(self, name):
print "in base getattr", name
if name in ('new',) or name.startswith('__'):
#this magic is to make BaseClass not trigger
# 'new' or '__call__' checks
print "caight new and call"
raise AttributeError(name)
self.base_list.append(name)
return name
class SimpleBase(object):
pass
class TestInstanceProperties(unittest.TestCase):
def setUp(self):
self.base = BaseClass([])
self.simple_base = SimpleBase()
def test_pass(self):
pass
def test_instance(self):
base = self.base
base.base_test = 0
extend = prototype(base)
extend.test = 1
assert extend.base_test == base.base_test == 0, "testing fallback"
assert extend.test == 1, "testing basic setting"
assert extend.test2 == 'test2', "testing fallback"
assert base.test == 'test', "ensuring property didn't propagate up"
assert base.base_list == ['init', 'test2', 'test']
def test_instance_is_dynamic(self):
base = self.base
base.test = 0
extend = prototype(base)
assert extend.test == base.test == 0, "testing basic fallback"
base.test = 1
assert extend.test == base.test == 1, "testing dynamic fallback"
assert base.base_list == ['init']
def test_instance_not_defined(self):
base = self.simple_base
base.test = 5
extend = prototype(base)
extend.extend_test = 42
assert base.test == extend.test == 5, "simple fallback"
try:
extend.foo
assert False, "extend.foo should raise an exception"
except AttributeError:
pass
assert extend.extend_test == 42, "basic setting"
try:
base.extend_test
assert False, "base.extend_test shouldn't be set (propagation)"
except AttributeError:
pass
def test_functions_work(self):
import new
base = self.base
def id(self, x):
return x
base.id = new.instancemethod(id, base)
base.id(5)
extend = prototype(base)
extend.incr = lambda x: x + 1 #try adding a classmethod
def incr2(self, x):
return x + 2
extend.incr2 = new.instancemethod(incr2, extend) #and a bound method
assert base.id(5) == extend.id(5) == 5, "fallback instance methods work"
assert extend.incr(5) == 6, "bound class methods work"
assert extend.incr2(5) == 7, "bound instance methods work"
def test_existing_new_works(self):
base = self.base
base.new = 5
base.bar = 6
extend = prototype(base)
try:
extend.bar
assert False, "you can't use magic new removal if base has a new"
except PrototypeException:
pass
assert isinstance(extend, PrototypeSwitcher), "prototype switching"
extend2 = extend.new
assert not isinstance(extend2, PrototypeSwitcher), "switch worked"
assert extend2.bar == base.bar == 6, "switched instance prototypes corerctly"
extend3 = extend.new
assert id(extend2) == id(extend3), "news are singletons"
def test_multi_level(self):
base = self.simple_base
base.a = 1
extend = prototype(base).new
extend.b = 2
further = prototype(extend).new
further.c = 3
sibling = prototype(extend).new
sibling.d = 4
baby = prototype(sibling).new
baby.e = 5
assert base.a == extend.a == further.a == sibling.a == baby.a == 1
assert extend.b == further.b == sibling.b == baby.b == 2
assert further.c == 3
try:
sibling.c
assert False
except:
pass
try:
baby.c
except:
pass
assert sibling.d == baby.d == 4
assert baby.e == 5
class TestClassProperties(unittest.TestCase):
def setUp(self):
self.base = BaseClass([])
@prototype(self.base)
class ExtendBase(object):
extend_marker = 5
def __init__(self):
self.member_extend = 6
@classmethod
def add5(cls, x):
return x + 5
def add3(self, x):
return x + 3
self.extend = ExtendBase
def test_class_extension(self):
extend = self.extend()
base = self.base
assert extend.test == base.test == 'test', "base getattr works"
base.foo = 1
assert extend.foo == base.foo == 1, "base setting works"
extend.bar = 2
assert extend.bar == 2, "extend setting works"
assert base.bar == 'bar', "propagation didn't happen"
assert extend.extend_marker == 5, "class variables work"
assert base.extend_marker == 'extend_marker', "propagation didn't happen"
assert extend.member_extend == 6, "member variables work"
assert extend.add3(5) == 8, "methods work"
assert extend.add5(5) == 10, "classmethods work"
assert self.extend.add5(6) == 11, "classmethods work on the class"
print base.base_list
assert base.base_list == extend.base_list == ['init', 'test', 'test', 'bar', 'extend_marker'], "correct calls were made to base.getattr"
def test_class_extension_with_call_set(self):
class Callable(object):
def __call__(self, x):
return x + 5
base = Callable()
assert base(5) == 10, "call correctly written"
try:
@prototype(base)
class ShouldFail(object):
pass
assert False, "Required extend when call overwritten"
except PrototypeException:
pass
@prototype(base, extend=True)
class ShouldPass(object):
pass
extend = ShouldPass()
print extend
print getattr(extend, '__call__')
print getattr(extend, '__call__'), "should have __call__"
assert hasattr(extend, '__call__'), "should have __call__"
assert callable(extend), "should be callable"
assert getattr(extend, '__call__', False), "__call__ progates correctly"
print base(7)
print extend(8)
assert extend(6) == base(6) == 11, "call works correctly on classical prototypes"
def test_docs_work(self):
class Docs(object):
"""testing docs"""
pass
base = Docs()
assert base.__doc__ == "testing docs", "docs stuck"
@prototype(base)
class OK(object):
pass
extend = OK()
print extend.__doc__
assert extend.__doc__ == None, "You can't set __doc__ wtf python"
from test_helper import FullyPython
class TestCrazyPython(unittest.TestCase):
def setUp(self):
self.base = FullyPython()
def test_mock(self):
assert self.base
sub = prototype(self.base)
return #TODO: make this work
print len(sub)
assert len(sub) == len(self.base)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""UI for baking crowd simulations to individual RSProxies and creating
necessary nodes to be able to render them
"""
import hou
def create_bake_setup():
"""creates the necessary nodes to bake the crowd to RSProxies"""
nodes = hou.selectedNodes()
if not nodes:
raise RuntimeError("Select a node please")
node_to_bake = nodes[0]
# get the parent
parent_node = node_to_bake.parent()
nodes_created = []
# *************************************
#
# The Bake Branch
#
# *************************************
# create a for each loop
block_begin_node = parent_node.createNode("block_begin")
# set it to "Fetch Piece or Point"
block_begin_node.parm("method").set(1)
block_begin_node.setInput(0, node_to_bake)
nodes_created.append(block_begin_node)
# create attr wrangle
attr_wrangle_node = parent_node.createNode("attribwrangle")
attr_wrangle_node.parm("snippet").set("@P = {0, 0, 0};")
attr_wrangle_node.setInput(0, block_begin_node)
nodes_created.append(attr_wrangle_node)
# Redshift Proxy Output
rs_proxy_output = parent_node.createNode("Redshift_Proxy_Output")
rs_proxy_output.parm("RS_archive_file").set(
'$HIP/Outputs/rs/$F/Crowd_`detail(-1, "iteration", 0)`.$F.rs'
)
rs_proxy_output.parm("RS_archive_removeAtt").set(False)
rs_proxy_output.parm("RS_archive_skipFiles").set(True)
rs_proxy_output.parm("RS_renderCamera").set("proxy_export")
rs_proxy_output.parm("RS_multihreadLoader").set(False)
rs_proxy_output.parm("RS_nonBlockingRendering").set(False)
rs_proxy_output.parm("RS_addDefaultLight").set(False)
rs_proxy_output.parm("RS_arbitraryUVMapNames").set(True)
rs_proxy_output.setInput(0, attr_wrangle_node)
nodes_created.append(rs_proxy_output)
# Python node
python_node = parent_node.createNode("python")
python_node.parm("python").set(
"""# run only in batch mode
if hou.applicationName() == 'hbatch':
rs_node = hou.node("../%s")
rs_node.parm("execute").pressButton()"""
% rs_proxy_output.name()
)
python_node.setInput(0, attr_wrangle_node)
nodes_created.append(python_node)
# block end node
block_end_node = parent_node.createNode("block_end")
block_end_node.parm("itermethod").set(1) # By Pieces or Point
block_end_node.parm("method").set(1) # Merge Each Iteration
block_end_node.parm("class").set(0) # Primitives
block_end_node.parm("useattrib").set(0) # Use Piece Attrib
block_end_node.parm("blockpath").set("../%s" % block_begin_node.name())
block_end_node.parm("templatepath").set("../%s" % block_begin_node.name())
block_end_node.parm("stopcondition").set(0)
block_end_node.setInput(0, python_node)
nodes_created.append(block_end_node)
# The second block begin node
metadata_node = parent_node.createNode("block_begin")
metadata_node.parm("method").set(2) # Fetch Metadata
metadata_node.parm("blockpath").set("../%s" % block_end_node.name())
# also set the block path of the block_begin_node
block_begin_node.parm("blockpath").set("../%s" % block_end_node.name())
nodes_created.append(metadata_node)
from anima.dcc.houdini import auxiliary
# Create space input0 for rs proxy output node
auxiliary.create_spare_input(rs_proxy_output, "../%s" % metadata_node.name())
# delete node
delete_node = parent_node.createNode("delete")
delete_node.parm("group").set("*")
delete_node.parm("entity").set(1)
delete_node.setInput(0, block_end_node)
nodes_created.append(delete_node)
# attribute delete
attr_delete_node = parent_node.createNode("attribdelete")
attr_delete_node.parm("ptdel").set("*")
attr_delete_node.parm("vtxdel").set("*")
attr_delete_node.parm("primdel").set("*")
attr_delete_node.parm("dtldel").set("*")
attr_delete_node.setInput(0, delete_node)
nodes_created.append(attr_delete_node)
# file cache node
file_cache_node = parent_node.createNode("filecache")
file_cache_node.parm("file").set("$TEMP/null.bgeo")
file_cache_node.setInput(0, attr_delete_node)
nodes_created.append(file_cache_node)
# align the nodes
network_editor = auxiliary.get_network_pane()
import nodegraphalign # this is a houdini module
nodegraphalign.alignConnected(network_editor, block_begin_node, None, "down")
# select newly created nodes
nodes_created[0].setSelected(True, True)
for node in nodes_created:
node.setSelected(True, False)
def do_bake():
"""bakes the crowd to RSProxies"""
pass
def create_render_setup():
"""creates the render setup"""
nodes = hou.selectedNodes()
if not nodes:
raise RuntimeError("Select a node please")
node_to_bake = nodes[0]
# get the parent
parent_node = node_to_bake.parent()
nodes_created = []
# *************************************
#
# The Render branch
#
# *************************************
# add node
add_node = parent_node.createNode("add")
add_node.parm("keep").set(1)
add_node.setInput(0, node_to_bake)
nodes_created.append(add_node)
# the second attr delete node
attr_delete_node = parent_node.createNode("attribdelete")
attr_delete_node.parm("ptdel").set("*")
attr_delete_node.parm("vtxdel").set("*")
attr_delete_node.parm("primdel").set("*")
attr_delete_node.parm("dtldel").set("*")
attr_delete_node.setInput(0, add_node)
nodes_created.append(attr_delete_node)
# attr wrangle - load proxies
attr_wrangle_node = parent_node.createNode("attribwrangle")
attr_wrangle_node.parm("snippet").set(
"""s@instancefile = concat(
"$HIP/Outputs/rs/", itoa(@Frame), "/Crowd_", itoa(@ptnum), ".", itoa(@Frame), ".rs"
);"""
)
attr_wrangle_node.setInput(0, attr_delete_node)
nodes_created.append(attr_wrangle_node)
# attr wrangle - set materials
import random
attr_wrangle_node2 = parent_node.createNode("attribwrangle")
attr_wrangle_node2.parm("snippet").set(
"""int material_ids[] = {1,2,3,4,5,6,7,8,9,10};
int material_id = material_ids[sample_discrete(len(material_ids), rand(@ptnum + %0.3f))];
s@shop_materialpath = concat("/mat/Material", itoa(material_id));
"""
% random.random()
)
attr_wrangle_node2.setInput(0, attr_wrangle_node)
nodes_created.append(attr_wrangle_node2)
# attr_wrangle_node3.setDisplayFlag(True)
# attr_wrangle_node3.setRenderFlag(True)
# align the nodes
from anima.dcc.houdini import auxiliary
network_editor = auxiliary.get_network_pane()
import nodegraphalign
nodegraphalign.alignConnected(network_editor, add_node, None, "down")
# select newly created nodes
nodes_created[0].setSelected(True, True)
for node in nodes_created:
node.setSelected(True, False)
|
|
# -*- Mode: Python; tab-width: 4 -*-
# Copyright (c) 2005-2010 Slide, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Ultra mini lightweight framework
"""
import os, re, types, urllib, cgi, cStringIO, sys, tempfile, Cookie
import exceptions
try:
import amf
except:
amf = None
try:
import amfast
except:
amfast = None
class LocationDeprecationWarning(DeprecationWarning):
pass
class xml(object):
"""An object which contains only xml bytes.
"""
def __init__(self, string):
self.string = string
####################
####################
## Argument Converters
####################
####################
def arguments(*args):
"""A Decorator which you wrap around a function or a method to indicate
which magical objects you want passed to your functions which are
embedded in a Resource's template.
The magical values you can pass are:
LOCATION
REQUEST
SITE
RESOURCE
METHOD
URI
For example, to pass a function the SiteMap instance, do this:
@arguments(SITE)
def foo(site):
print "The site!"
The values which you pass a string argument to get variables out of the
request environment (which also take a default) are:
QUERY
QUERIES
ARG
ARGS
COOKIE
HEADER
For example, to pass a query parameter to a function, with a default
if the parameter is not present, do this:
@arguments(QUERY('foo', 'default'))
def foo(foo):
print foo
All the magic objects which take string values have a __getattr__
implementation for syntatic convenience when you do not need to
pass a default value (None will be used as the default value):
@arguments(QUERY.foo)
def foo(foo):
print foo
"""
def decorate(func):
func.arguments = args
return func
return decorate
class LOCATION(object):
"""The Location object representing the URL currently being rendered.
"""
class REQUEST(object):
"""The Request object representing the current web request.
"""
class SITE(object):
"""The SiteMap object in which the resource which is currently being
rendered resides.
"""
class RESOURCE(object):
"""The currently-being-rendered resource.
"""
class METHOD(object):
"""The method of the current request.
"""
class URI(object):
"""The URI of the current request.
"""
class Thing(object):
"""A lazy thing. Lazy things have a name and a default
value, and are looked up lazily from the request environment.
"""
def __init__(self, name, default=None):
self.name = name
self.default = default
def __getattr__(self, name):
return type(self)(name)
def __call__(self, name, default=None):
return type(self)(name, default)
class QUERY(Thing):
"""A query argument (url parameter)
"""
class QUERIES(Thing):
"""Produce a generator of all query arguments with the given name
"""
class ARG(Thing):
"""A form post variable.
"""
class ARGS(Thing):
"""All form post variables with the given name.
"""
class COOKIE(Thing):
"""A cookie with the given name.
"""
class HEADER(Thing):
"""A header with the given name.
"""
def getQueries(request, arg):
queries = tuple(request.get_query(arg.name))
if not queries:
return arg.default
return queries
def getQuery(request, arg):
queries = getQueries(request, arg)
if queries == arg.default:
return arg.default
return queries[0]
def getArgs(request, arg):
val = request.get_arg_list(arg.name)
if not val:
return arg.default
return val
def getCookie(request, arg):
"""get a cookie woot.
"""
if not hasattr(request, '_simple_cookie'):
cookie = request.get_header('Cookie')
if not cookie:
return arg.default
c = Cookie.SimpleCookie()
c.load(cookie)
request._simple_cookie = c
cook_value = request._simple_cookie.get(arg.name)
if cook_value and cook_value.value:
return cook_value.value
return arg.default
def getLocation(request, arg):
import warnings
if True:
warnings.warn(
"The location is deprecated. Use the request instead.",
LocationDeprecationWarning, 2)
return request
## Argument converters are passed to the "arguments" decorator.
## Immediately before the decorated function is called, they are invoked.
## They are passed information about the current request
## and they should return the lazy value they represent.
argumentConverters = {
LOCATION: getLocation,
REQUEST: lambda request, arg: request,
SITE: lambda request, arg: request.site,
RESOURCE: lambda request, arg: request.resource,
METHOD: lambda request, arg: request.method(),
URI: lambda request, arg: request.uri(),
QUERY: getQuery,
QUERIES: getQueries,
ARG: lambda request, arg: request.get_arg(arg.name, arg.default),
ARGS: getArgs,
COOKIE: getCookie,
HEADER: lambda request, arg: request.get_header(arg.name, arg.default),
}
## These things are all prototype objects; they have __getattr__
## and __call__ factories which are used for generating clones
## of them. The lines below make the names that people use
## into instances instead of classes.
LOCATION = LOCATION()
REQUEST = REQUEST()
SITE = SITE()
RESOURCE = RESOURCE()
METHOD = METHOD()
URI = URI()
QUERY = QUERY('')
QUERIES = QUERIES('')
ARG = ARG('')
ARGS = ARGS('')
COOKIE = COOKIE('')
HEADER = HEADER('')
####################
####################
## End Argument Converters
####################
####################
####################
####################
## Rendering Converters
####################
####################
## Rendering converters
## When objects are placed into a DOM which is rendered to be
## sent to the client as HTML, they are run through a series
## of rendering converters until nothing but a single "xml"
## instance is left.
def convertFunction(request, function):
args = []
for arg in getattr(function, 'arguments', ()):
converter = argumentConverters.get(type(arg))
if converter is None:
args.append(arg)
else:
args.append(converter(request, arg))
return function(*args)
convertSequence = lambda request, sequence: xml(
''.join([request.convert(request, x) for x in sequence]))
convertNumber = lambda request, number: xml(str(number))
class show(object):
"""A marker which will lazily look up a show_* method
from the currently-being-rendered resource when encountered
in a template.
"""
def __init__(self, name, args=()):
self.name = name
self.args = args
def __getattr__(self, name):
return type(self)(name)
def __call__(self, *args):
return type(self)(self.name, self.args + args)
def __iter__(self):
return iter(())
def convertShow(request, theDirective):
@arguments(RESOURCE, *theDirective.args)
def convert(resource, *args):
return getattr(resource, "show_%s" % (theDirective.name, ))(*args)
return convert
class stan(object):
def __init__(
self, tag, attributes, children, pattern=None, show=None, clone=False):
(self.tag, self.attributes, self.children, self.pattern, self.show,
self.clone) = (
tag, attributes, children, pattern, show, clone)
def __getitem__(self, args):
"""Add child nodes to this tag.
"""
if not isinstance(args, (tuple, list)):
args = (args, )
if not isinstance(args, list):
args = list(args)
if self.clone:
return type(self)(
self.tag, self.attributes.copy(), [
x for x in self.children] + args,
self.pattern, self.show, False)
self.children.extend(args)
return self
def __call__(self, **kw):
"""Set attributes of this tag. There are two special names
which are reserved:
pattern
Make it possible to find this node later using the
findPattern function
show
When this node is rendered, instead render the
value passed as the "show" value.
"""
if kw.has_key('pattern'):
pattern = kw.pop('pattern')
else:
pattern = self.pattern
if kw.has_key('show'):
show = kw.pop('show')
else:
show = self.show
if self.clone:
newattrs = self.attributes.copy()
newattrs.update(kw)
return type(self)(
self.tag, newattrs, self.children[:], pattern, show, False)
self.attributes.update(kw)
self.pattern = pattern
self.show = show
return self
def cloneNode(self):
return type(self)(
self.tag, self.attributes.copy(), self.children[:], None,
self.show, False)
def tagFactory(tagName):
return stan(tagName, {}, [], None, None, True)
def findPattern(someStan, targetPattern):
"""Find a node marked with the given pattern, "targetPattern",
in a DOM object, "someStan"
"""
pat = getattr(someStan, 'pattern', None)
if pat == targetPattern:
return someStan.cloneNode()
for child in getattr(someStan, 'children', []):
result = findPattern(child, targetPattern)
if result is not None:
return result.cloneNode()
## TODO: Inline elements shouldn't have any whitespace after them or before
## the closing tag.
def convertStan(request, theStan):
## XXX this probably isn't necessary
request.tag = theStan
if theStan.show is not None:
return theStan.show
if theStan.pattern is not None:
return xml('')
attrs = ''
if theStan.attributes:
for key, value in theStan.attributes.items():
attrs += ' %s="%s"' % (
key, request.convert(request, value).replace('"', '"'))
#"
depth = getattr(request, 'depth', 0)
indent = ' ' * depth
request.depth = depth + 1
if theStan.tag in inline_elements:
template = """<%(tag)s%(attrs)s>%(children)s</%(tag)s>"""
else:
template = """
%(indent)s<%(tag)s%(attrs)s>
%(indent)s %(children)s
%(indent)s</%(tag)s>
"""
result = template % dict(
indent=indent, tag=theStan.tag, attrs=attrs,
children=request.convert(request, theStan.children).strip())
request.depth -= 1
return xml(result)
inline_elements = [
'a', 'abbr', 'acronym', 'b', 'basefont', 'bdo', 'big', 'br', 'cite',
'code', 'dfn', 'em', 'font', 'i', 'img', 'input', 'kbd', 'label',
'q', 's', 'samp', 'select', 'small', 'span', 'strike', 'strong',
'sub', 'sup', 'textarea', 'tt', 'u', 'var']
inline_elements = dict((x, True) for x in inline_elements)
tags_to_create = [
'a','abbr','acronym','address','applet','area','b','base','basefont','bdo',
'big','blockquote','body','br','button','caption','center','cite','code',
'col','colgroup','dd','dfn','div','dl','dt','em','fieldset','font','form',
'frame','frameset','h1','h2','h3','h4','h5','h6','head','hr','html','i',
'iframe','img','input','ins','isindex','kbd','label','legend','li','link',
'menu','meta','noframes','noscript','ol','optgroup','option','p','param',
'pre','q','s','samp','script','select','small','span','strike','strong',
'style','sub','sup','table','tbody','td','textarea','tfoot','th','thead',
'title','tr','tt','u','ul','var'
]
class tags(object):
"""A namespace for tags, so one can say "from ultramini import tags"
and have access to all tags as tags.html, tags.foo, etc
"""
pass
for tag in tags_to_create:
T = tagFactory(tag)
globals()[tag] = T
setattr(tags, tag, T)
del tag
del T
del tags_to_create
converters = {
unicode: lambda request, uni: uni.encode('utf8'),
list: convertSequence,
tuple: convertSequence,
types.GeneratorType: convertSequence,
types.FunctionType: convertFunction,
types.MethodType: convertFunction,
types.UnboundMethodType: convertFunction,
int: convertNumber,
float: convertNumber,
long: convertNumber,
show: convertShow,
stan: convertStan,
str: lambda request, string: xml(
string.replace("&", "&").replace("<", "<").replace(">", ">"))
}
converters.update(argumentConverters)
show = show('')
def registerConverter(convertType, converter):
"""Call this to register a converter for one of your custom types.
"""
converters[convertType] = converter
def convert(request, stuff):
"""Recursively apply stuff converters until we get an xml instance.
"""
while not isinstance(stuff, xml):
convert = converters.get(type(stuff))
if convert is None:
raise RuntimeError, "Converter for type %r (%r) not found." % (
type(stuff), stuff)
stuff = convert(request, stuff)
return stuff.string
####################
####################
## End Rendering Converters
####################
####################
####################
####################
## Incoming Type Converters
####################
####################
## When arguments come in (as GET url parameters or POST arguments)
## they can be converted from strings to Python types using incoming
## type converters. To use them, wrap a get_* or post_* method
## using the annotate decorator. When a get_ or post_ method
## is called (by having a corresponding ?get= parameter or action
## argument) the arguments named in the decorator will be converted
## before they are passed to the method.
def annotate(*args, **kwargs):
'''annotate
A decorator which annotates a handler function (GET and/or POST
method) with pre and/or post request filter functions as well as
mapping functions for each argument the action method will receive.
First argument: return converter(s)
The return value converter, if present, can be either a single
callable or a list of callables. The return value of the annotated
function is passed as the only argument to the callable and the
callable returns a string which is sent as the body of the HTTP
response. When the return converter is a list of callables the call
argument and return value of each are chained together. Only the
last callable needs to return a string for the body of the HTTP
response. If the first argument is not present then the return
value of the annotated function must be a string which will be the
body of the HTTP response.
Second argument: input converter(s)
The second argument, if present, is the input converter, and can
be either a single callable or a list of callables. Each is called
with three arguments; 1) the handler instance to which the annotated
method belongs, 2) the request object, 3) the request field storage,
in the case of a POST, or the request query parameters, in the case
of a GET, represented as a dictionary. The return value can either be
None, or a new dictionary representation of the third argument which
will be chained into the next input converter callable and/or used
for the annotated mapping functions described in the next section.
Keyword arguments: action mapping functions
Each keyword argument is either a callable or one of the well
known constants below. In both cases each keyword represents the
method parameter of the same name.
In the case that the keyword argument is set to one of the well
known constants the object/data represented by the constants, as
described below, will be passed into the method as the parameter
of the same name as the keyword argument.
In the case that the keyword argument is a callable AND there is
a query parameter, in the case of a GET, or a field argument, in
the case of a POST, of the same name, then the value of the request
parameter/argument is passed into the callable. The result of the
callable is then passed into the method as the parameter
of the same name as the keyword argument.
Keyword argument constants:
(used as annotation types for retrieving specific information about
the request)
REQUEST - request (corohttpd.HttpRequest)
COOKIE - cookies (Cookie.SimpleCookie) request.get_cookie()
HEADER - HTTP header (mimetools.Message) request.get_headers()
METHOD - HTTP method (str) request.method()
URI - HTTP uri (str) request.uri()
QUERIES - query parameters (list) request.get_query_pairs()
ARGS - arguments (cgi.FieldStorage) request.get_field_storage()
Examples:
To expose a method to be called by placing ?action=foo
in the query string with a single parameter bar of type int:
(e.g. /handler?action=foo&bar=2)
@method()
@annotate(bar = int)
def action_foo(self, bar):
print bar
To express lists of things:
@method()
@annotate(bar=[int])
def action_foo(self, bar):
for someInt in bar:
print someInt
Include converters; return converter to ensure a string response, a
input converter to decode the crypto on bar:
def fini(value):
return str(value)
def init(rec, req, param):
if param.has_key('bar'):
param['bar'] = decode(param['bar'])
return param
@annotate(fini, init, bar = int)
def get_default(self, bar = 0):
print bar
return 2 * bar
Include request object to set response header:
@annotate(fini, init, req = REQUEST, bar = int)
def get_default(self, req, bar = 0):
print bar
req.set_header('X-Bar', bar)
return 2 * bar
'''
TODO = '''
Dicts of things should be expressed by presenting
arguments with a common prefix separated by a dot. For
example:
?get=foo&someDict.bar=1&someDict.baz=foo
Would call:
@annotate(someDict={bar: int, baz: str})
def get_foo(self, someDict):
print someDict['bar']
print someDict['baz']
This is still TODO. (Perhaps colon instead of dot?)
'''
returnConverter = args and args[0]
inputConverter = None
if len(args) == 2:
inputConverter = args[1]
def decorate(method):
method.returnConverter = returnConverter
method.inputConverter = inputConverter
method.types = kwargs
return method
return decorate
def convertList(request, theList):
request.target = request.target[0]
if not isinstance(theList, list):
theList = [theList]
return [convertTypes(request, item) for item in theList]
def convertTuple(request, theTuple):
stuff = []
for i, subtype in enumerate(request.target):
request.target = subtype
stuff.append(convertTypes(request, theTuple[i]))
return tuple(stuff)
def convertDict(request, nothing):
theDict = {}
for field in request.get_field_storage().list:
if field.name.startswith(request.target_name + '.'):
theDict[field.name.split('.', 1)[-1]] = field.value
target = request.target
result = {}
for (key, value) in target.items():
request.target = value
result[key] = convertTypes(request, theDict[key])
return result
typeConverters = converters.copy()
typeConverters.update({
int: lambda target, value: int(value),
float: lambda target, value: float(value),
list: convertList,
tuple: convertTuple,
dict: convertDict,
str: lambda target, value: value,
unicode: lambda target, value: value.decode('utf8'),
types.FunctionType: lambda request, value: request.target(request, value),
object: lambda target, value: value,
bool: lambda target, value: bool(value),
type(REQUEST): lambda request, value: request,
type(SITE): lambda request, value: request.site,
type(RESOURCE): lambda request, value: request.resource,
type(METHOD): lambda request, value: request.method(),
type(URI): lambda request, value: request.uri(),
type(QUERIES): lambda request, value: request.get_query_pairs(),
type(ARGS): lambda request, value: request.get_field_storage(),
type(COOKIE): lambda request, value: request.get_cookie(),
type(HEADER): lambda request, value: request.get_headers(),
})
def registerTypeConverter(convertType, converter):
typeConverters[convertType] = converter
def convertTypes(request, value):
target = request.target
converter = typeConverters.get(type(target))
if converter is None:
converter = typeConverters.get(target)
if converter is None:
raise ValueError, "No converter for type %s (%s)" % (target, value)
return converter(request, value)
def storage_to_dict(fs):
rc = {}
for k, v in dict(fs).iteritems():
if isinstance(v, list):
rc[k] = [isinstance(e, cgi.MiniFieldStorage) and e.value or e for e in v]
elif isinstance(v, basestring):
rc[k] = v
elif not hasattr(v, 'value'):
rc[k] = v
else:
rc[k] = v.value
return rc
def applyWithTypeConversion(callable_method, request, **kwargs):
'''
Currently useful kwargs:
`resource` => instance of the resource executing
'''
types = getattr(callable_method, 'types', {})
returnConverter = getattr(callable_method, 'returnConverter', None)
inputConverter = getattr(callable_method, 'inputConverter', None)
fs = request.get_field_storage()
parameters = storage_to_dict(fs)
if inputConverter:
if not isinstance(inputConverter, (list, tuple)):
inputConverter = (inputConverter, )
for conv in inputConverter:
rc = conv(kwargs.get('resource'), request, parameters)
parameters = rc or parameters
try:
converted = {}
for (key, value) in types.items():
request.target_name = key
request.target = value
if parameters.has_key(key):
val = parameters[key]
else:
val = getattr(value, 'default', None)
try:
val = convertTypes(request, val)
except (TypeError, ValueError):
val = None
if val is not None:
converted[key] = val
result = callable_method(**converted)
except TypeError:
raise
if result is None:
result = ''
if not returnConverter:
return False, result
if not isinstance(returnConverter, (list, tuple)):
returnConverter = (returnConverter, )
for conv in returnConverter:
result = conv(result)
return True, result
##AMF Converter Support
class AMFDecodeException(exceptions.Exception):
pass
class AMFFieldStorage(cgi.FieldStorage):
def __init__(self, *args, **kwargs):
self.classmapper = kwargs.pop('class_mapper', None)
cgi.FieldStorage.__init__(self, *args, **kwargs)
def read_single(self):
qs = self.fp.read(self.length)
if qs.strip() == '':
raise AMFDecodeException('empty AMF data on decode')
ct = amfast.context.DecoderContext(qs, amf3=True,
class_def_mapper=self.classmapper)
data = amfast.decoder.decode(ct)
ct = None
self.list = [cgi.MiniFieldStorage(k,v) for k,v in data.amf_payload.iteritems()]
self.skip_lines()
####################
####################
## End Incoming Converters
####################
####################
####################
####################
## URL Traversal
####################
####################
## SiteMap has a root Resource and traverses the URL using the Resource
## interface.
class SiteMap(object):
"""An object which is capable of matching Resource objects to URLs and
delegating rendering responsibility to the matched object.
This class implements the corohttpd handler interface: match
and handle_request. It adapts this interface to something more similar
to the Nevow URL traversal interface: findChild and handle (like
locateChild and renderHTTP in Nevow).
"""
def __init__(self, root, **parameters):
self.root = root
root.child_ = root
self.__dict__.update(parameters)
def __del__(self):
self.root._clear_child_()
def match(self, request):
request.site = self
remaining = tuple(request._path.split('/')[1:])
if request._path.find('://') < 0:
# Provided our path doesn't contain another URL, trim redundant slashes
parts = request._path.split('/')[1:]
remaining = tuple((s for i, s in enumerate(parts) if s or i == (len(parts) - 1)))
child = self.root
handled = ()
while child is not None and remaining:
child, handled, remaining = child.findChild(request, remaining)
if child is None:
return False
#
# Allow resources to delegate to someone else,
# even if they have been selected as the target of rendering
#
newChild = child.willHandle(request)
if newChild is None:
return False
if newChild is not child:
child = newChild
request.segments = handled
request.resource = child
return True
def handle_request(self, request):
request.convert = convert
resource = request.resource
request.set_name(resource)
resource.handle(request)
request.resource = None
#
# Resource specific decorator(s).
#
def method_post(*args, **kwargs):
def decorate(m):
m.ultramini_method = ['post']
m.ultramini_inheritable = kwargs.get('inherit')
return m
if args:
return decorate(args[0])
return decorate
def method_get(*args, **kwargs):
def decorate(m):
m.ultramini_method = ['get']
m.ultramini_inheritable = kwargs.get('inherit')
return m
if args:
return decorate(args[0])
return decorate
def method(*args, **kwargs):
def decorate(m):
m.ultramini_method = ['get', 'post']
m.ultramini_inheritable = kwargs.get('inherit')
return m
if args:
return decorate(args[0])
return decorate
class Resource(object):
contentType = 'text/html'
template = "Resources must provide a template attribute."
_amf_class_mapper = None
global_public_methods = {}
def __init__(self, *args, **kwargs):
self.__init_global__()
self._public_methods = self.global_public_methods[hash(self.__class__)]
return super(Resource, self).__init__(*args, **kwargs)
def __init_global__(self):
if self.global_public_methods.has_key(hash(self.__class__)):
return None
data = {}
for name in dir(self):
element = getattr(self, name)
for label in getattr(element, 'ultramini_method', []):
if self.__class__.__dict__.get(name):
data.setdefault(label, []).extend([name])
elif getattr(element, 'ultramini_inheritable', False):
data.setdefault(label, []).extend([name])
self.global_public_methods[hash(self.__class__)] = data
def __call__(self, request, name):
return self
_child_re = re.compile(r'^child_')
def _clear_child_(self):
"""_clear_child_ deletes this object's 'child_' attribute,
then loops through all this object's attributes, and
recursively calls the _clear_child method of any child_*
attributes
Arguments:
self: mandatory python self arg
"""
try:
del self.child_
except AttributeError, e:
pass
for attr in filter(self._child_re.match, dir(self)):
try:
getattr(self, attr)._clear_child_()
except AttributeError:
pass
def willHandle(self, request):
"""This Resource is about to handle a request.
If it wants to delegate to another Resource, it can return it here.
"""
return self
def findChild(self, request, segments):
"""External URL segment traversal API. This method MUST always
return a tuple of:
(child, handled, remaining)
child may be None to indicate a 404. Handled should be a tuple
of URL segments which were handled by this call to findChild;
remaining should be a tuple of URL segments which were not
handled by this call to findChild.
findChild can be overriden to implement fancy URL traversal
mechanisms such as handling multiple segments in one call,
doing an internal server-side redirect to another resource and
passing it segments to handle, or delegating this segment
to another resource entirely. However, for most common use
cases, you will not override findChild.
Any methods or attributes named child_* will be mapped to
URL segments. For example, if an instance of Root is set
as the root object, the urls "/foo" and "/bar" will be valid:
class Root(Resource):
child_foo = Resource()
def child_bar(self, request, bar):
return Resource()
Finally, if a childFactory method is defined it will be called
with a single URL segment:
class Root(Resource):
def childFactory(self, request, childName):
## For URL "/foo" childName will be "foo"
return Resource()
"""
current, remaining = segments[0], segments[1:]
childFactory = getattr(
self,
'child_%s' % (current, ),
self.childFactory)
return childFactory(request, current), (current, ), remaining
def childFactory(self, request, childName):
"""Override this to produce instances of Resource to represent the next
url segment below self. The next segment is provided in childName.
"""
return None
def handle(self, request):
request.set_header('Content-Type', self.contentType)
handler = getattr(
self, "handle_%s" % (request.method(), ), self.handle_default)
handler(request)
def handle_default(self, request):
request.push(
"<html><body>No handler for method %r present.</body></html>" %
(request.method(), ))
def handle_get(self, request):
get_method = self.getAction('get', request)
if get_method is None:
request.write(self.template)
else:
converted, result = applyWithTypeConversion(get_method, request,
resource=self)
request.write(result)
################
## AMF Specific
################
@classmethod
def mapped_classes(cls):
return [
amf.AMFRequest,
]
def amf_class_mapper(self):
'''
Return an amfast ClassDefMapper object to properly deserialize objects
in this request.
Note: this function may be called multiple times
'''
if not self._amf_class_mapper:
self._amf_class_mapper = amfast.class_def.ClassDefMapper()
# Standard required mappings
amf.registerClassToMapper(amf.AMFRequest, self._amf_class_mapper)
amf.registerClassToMapper(amf.AMFResponse, self._amf_class_mapper)
amf.registerClassToMapper(amf.GenericAMFException, self._amf_class_mapper)
amf.registerClassToMapper(amf.AMFError, self._amf_class_mapper)
# Map classes for subclasses
for cls in self.mapped_classes():
amf.registerClassToMapper(cls, self._amf_class_mapper)
return self._amf_class_mapper
################
## END AMF Specific
################
def getFieldStorage(self, data, headers, environ):
'''getFieldStorage
Result is an instance of cgi.FieldStorage or subclass. This is
provided as a hook for subclasses to override if they want to
parse the incoming POST data differently than a basic FieldStorage.
For example, imgsrv/server overrides this to trick FieldStorage
into saving binary parts directly into the image file directory,
to avoid having to copy the file from a temporary file into the
final location.
data - readable file-stream
headers - request header dicttionary
environ - request environment dictionary
'''
if headers.getheader('Content-Type') == 'application/x-amf' \
and amf and amfast:
return AMFFieldStorage(data, headers, environ=environ,
class_mapper=self.amf_class_mapper())
return cgi.FieldStorage(
data,
headers,
environ = environ,
keep_blank_values = 1)
def getAction(self, key, request):
fs = request.get_field_storage()
actions = []
methods = getattr(self, '_public_methods', {})
for name in methods.get(key, []):
parts = name.split('_')
data = '_'.join(parts[1:])
for value in fs.getlist(parts[0]):
if value != data:
continue
method = getattr(self, name, None)
if method is None:
continue
actions.append(method)
if not actions:
return getattr(
self,
'%s_%s' % (key, fs.getfirst(key, 'default')),
None)
method = actions.pop()
if actions:
raise RuntimeError('multiple methods requested', method, actions)
return method
def handle_post(self, request):
fs = request.get_field_storage()
post_method = self.getAction('post', request)
if post_method is None:
result = self.post(request, fs)
converted = False
else:
converted, result = applyWithTypeConversion(post_method, request,
resource=self)
if not result:
request.write(self.template)
return None
if converted:
request.write(result)
return None
request.set_header('Location', request.convert(request, result))
request.response(303)
request.write('')
def post(self, request, form):
"""post
Override this to handle a form post.
Return a URL to be redirected to.
request: An HttpRequest instance representing the place the form
was posted to.
form: A cgi.FieldStorage instance representing the posted form.
"""
return request.uri()
class MovedPermanently(Resource):
def __init__(self, location):
self.location = location
@annotate(req=REQUEST)
def get_default(self, req):
req.set_header(
'Location',
self.location)
req.response(301) # Moved Permanently
return self.location
class MovedTemporarily(Resource):
def __init__(self, location):
self.location = location
@annotate(req=REQUEST)
def get_default(self, req):
req.set_header(
'Location',
self.location)
req.response(302) # Moved Temporarily
return self.location
class SeeOther(Resource):
def __init__(self, location):
self.location = location
@annotate(req=REQUEST)
def get_default(self, req):
req.set_header(
'Location',
self.location)
req.response(303) # See Other
return self.location
class DebugLoggingResource(Resource):
def enable_debug_logging(self, req, **kwargs):
if kwargs.has_key('loglevel'):
req.log_level(kwargs['loglevel'])
if kwargs.get('httpread', False):
req.connection().set_debug_read()
req.connection().add_debug_read_data(req.requestline)
req.connection().add_debug_read_data('')
for header in req.get_headers().items():
req.connection().add_debug_read_data('%s: %s' % header)
return None
def findChild(self, req, segments):
debug = {}
for value in req.get_query('ultradebug'):
try:
val = value.split('_')
key = '_'.join(val[:-1])
val = val[-1]
key, val = key and (key, val) or (val, True)
try:
debug[key] = int(val)
except (TypeError, ValueError):
debug[key] = val
except:
print 'Cannot parse debug identifier <%s>' % value
self.enable_debug_logging(req, **debug)
return super(DebugLoggingResource, self).findChild(req, segments)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2015_06_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
parameters, # type: "_models.LocalNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.LocalNetworkGateway"]
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2015_06_15.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2015_06_15.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.LocalNetworkGateway"
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2015_06_15.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
local_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LocalNetworkGatewayListResult"]
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2015_06_15.models.LocalNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks in multi-worker training with TF2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl.testing import parameterized
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base as test_base
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import callbacks
from tensorflow.python.keras.distribute import distributed_file_utils
from tensorflow.python.keras.distribute import multi_worker_testing_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
def checkpoint_exists(filepath):
"""Returns whether the checkpoint `filepath` refers to exists."""
if filepath.endswith('.h5'):
return file_io.file_exists_v2(filepath)
tf_saved_model_exists = file_io.file_exists_v2(filepath)
tf_weights_only_checkpoint_exists = file_io.file_exists_v2(
filepath + '.index')
return tf_saved_model_exists or tf_weights_only_checkpoint_exists
def _model_setup(test_obj, file_format):
"""Set up a MNIST Keras model for testing purposes.
This function builds a MNIST Keras model and returns relevant information
for testing.
Args:
test_obj: The `TestCase` testing object.
file_format: File format for checkpoints. 'tf' or 'h5'.
Returns:
A tuple of (model, saving_filepath, train_ds, steps) where train_ds is
the training dataset.
"""
batch_size = 64
steps = 2
with collective_strategy.CollectiveAllReduceStrategy().scope():
# TODO(b/142509827): In rare cases this errors out at C++ level with the
# "Connect failed" error message.
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
# Pass saving_filepath from the parent thread to ensure every worker has the
# same filepath to save.
saving_filepath = os.path.join(test_obj.get_temp_dir(),
'checkpoint.' + file_format)
return model, saving_filepath, train_ds, steps
def _get_task_config():
return json.loads(os.environ['TF_CONFIG'])['task']
class KerasCallbackMultiProcessTest(parameterized.TestCase, test.TestCase):
@ds_combinations.generate(
combinations.combine(
mode=['eager'],
file_format=['h5', 'tf'],
save_weights_only=[True, False]))
def test_model_checkpoint_saves_on_chief_but_not_otherwise(
self, file_format, mode, save_weights_only):
def proc_model_checkpoint_saves_on_chief_but_not_otherwise(
test_obj, file_format):
model, saving_filepath, train_ds, steps = _model_setup(
test_obj, file_format)
num_epoch = 2
extension = os.path.splitext(saving_filepath)[1]
# Incorporate type/index information and thread id in saving_filepath to
# ensure every worker has a unique path. Note that in normal use case the
# saving_filepath will be the same for all workers, but we use different
# ones here just to test out chief saves checkpoint but non-chief doesn't.
task_config = _get_task_config()
saving_filepath = os.path.join(
test_obj.get_temp_dir(), 'checkpoint_%s_%d%s' %
(task_config['type'], task_config['index'], extension))
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(checkpoint_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
validation_data=train_ds,
validation_steps=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=save_weights_only)
])
# If it's chief, the model should be saved; if not, the model shouldn't.
test_obj.assertEqual(
checkpoint_exists(saving_filepath), test_base.is_chief())
# If it's chief, the model should be saved (`write_filepath` should
# simply return `saving_filepath`); if not, i.e. for non-chief workers,
# the temporary path generated by `write_filepath` should no longer
# contain the checkpoint that has been deleted.
test_obj.assertEqual(
checkpoint_exists(
distributed_file_utils.write_filepath(
saving_filepath, model._distribution_strategy)),
test_base.is_chief())
multi_process_runner.run(
proc_model_checkpoint_saves_on_chief_but_not_otherwise,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self, file_format))
@ds_combinations.generate(combinations.combine(mode=['eager']))
def test_model_checkpoint_works_with_same_file_path(self, mode):
def proc_model_checkpoint_works_with_same_file_path(
test_obj, saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(file_io.file_exists_v2(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.ModelCheckpoint(filepath=saving_filepath)])
test_obj.assertTrue(file_io.file_exists_v2(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), 'checkpoint')
multi_process_runner.run(
proc_model_checkpoint_works_with_same_file_path,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self, saving_filepath))
@ds_combinations.generate(combinations.combine(mode=['eager']))
def test_backupandrestore_checkpoint_works_with_interruption(self, mode):
class InterruptingCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
if epoch == 2:
raise RuntimeError('Interrupting!')
class AssertCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
# the interruption happened on epoch 2 as specified in
# InterruptingCallback, so the initial epoch after restart will begin
# at 2.
assert epoch > 1
def proc_model_checkpoint_works_with_same_file_path(test_obj,
saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 4
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(file_io.file_exists_v2(saving_filepath))
bar_dir = os.path.join(os.path.dirname(saving_filepath), 'backup')
try:
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(filepath=saving_filepath),
callbacks.BackupAndRestore(backup_dir=bar_dir),
InterruptingCallback()
])
except RuntimeError as e:
if 'Interrupting!' not in str(e):
raise
multi_process_runner.get_barrier().wait()
backup_filepath = os.path.join(bar_dir, 'chief', 'checkpoint')
test_obj.assertTrue(file_io.file_exists_v2(backup_filepath))
test_obj.assertTrue(file_io.file_exists_v2(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(filepath=saving_filepath),
callbacks.BackupAndRestore(backup_dir=bar_dir),
AssertCallback()
])
multi_process_runner.get_barrier().wait()
test_obj.assertFalse(file_io.file_exists_v2(backup_filepath))
test_obj.assertTrue(file_io.file_exists_v2(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), 'checkpoint')
multi_process_runner.run(
proc_model_checkpoint_works_with_same_file_path,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self, saving_filepath))
@ds_combinations.generate(combinations.combine(mode=['eager']))
def test_tensorboard_saves_on_chief_but_not_otherwise(self, mode):
def proc_tensorboard_saves_on_chief_but_not_otherwise(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
# Incorporate type/index information and thread id in saving_filepath to
# ensure every worker has a unique path. Note that in normal use case the
# saving_filepath will be the same for all workers, but we use different
# ones here just to test out chief saves summaries but non-chief doesn't.
task_config = _get_task_config()
saving_filepath = os.path.join(
test_obj.get_temp_dir(),
'logfile_%s_%d' % (task_config['type'], task_config['index']))
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(file_io.file_exists_v2(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)])
# If it's chief, the summaries should be saved in the filepath; if not,
# the directory should be empty (although created). Using
# `file_io.list_directory()` since the directory may be created at this
# point.
test_obj.assertEqual(
bool(file_io.list_directory_v2(saving_filepath)),
test_base.is_chief())
multi_process_runner.run(
proc_tensorboard_saves_on_chief_but_not_otherwise,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self,))
@ds_combinations.generate(combinations.combine(mode=['eager']))
def test_tensorboard_can_still_save_to_temp_even_if_it_exists(self, mode):
def proc_tensorboard_can_still_save_to_temp_even_if_it_exists(test_obj):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
saving_filepath = os.path.join(
test_obj.get_temp_dir(), 'logfile_%s' % (_get_task_config()['type']))
saving_filepath_for_temp = os.path.join(saving_filepath, 'workertemp_1')
os.mkdir(saving_filepath)
os.mkdir(saving_filepath_for_temp)
# Verifies that even if `saving_filepath_for_temp` exists, tensorboard
# can still save to temporary directory.
test_obj.assertTrue(file_io.file_exists_v2(saving_filepath_for_temp))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)])
multi_process_runner.run(
proc_tensorboard_can_still_save_to_temp_even_if_it_exists,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self,))
@ds_combinations.generate(combinations.combine(mode=['eager']))
def test_tensorboard_works_with_same_file_path(self, mode):
def proc_tensorboard_works_with_same_file_path(test_obj, saving_filepath):
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
num_epoch = 2
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(file_io.file_exists_v2(saving_filepath))
multi_process_runner.get_barrier().wait()
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.TensorBoard(log_dir=saving_filepath)])
multi_process_runner.get_barrier().wait()
test_obj.assertTrue(file_io.list_directory_v2(saving_filepath))
saving_filepath = os.path.join(self.get_temp_dir(), 'logfile')
multi_process_runner.run(
proc_tensorboard_works_with_same_file_path,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self, saving_filepath))
@ds_combinations.generate(combinations.combine(mode=['eager']))
def test_early_stopping(self, mode):
def proc_early_stopping(test_obj):
class EpochCounterCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs):
self.last_epoch = epoch
model, _, train_ds, steps = _model_setup(test_obj, file_format='')
epoch_counter_cbk = EpochCounterCallback()
cbks = [
callbacks.EarlyStopping(
monitor='loss', min_delta=0.05, patience=1, verbose=1),
epoch_counter_cbk
]
# Empirically, it is expected that `model.fit()` terminates around the
# 22th epoch. Asserting that it should have been stopped before the 50th
# epoch to avoid flakiness and be more predictable.
model.fit(x=train_ds, epochs=100, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertLess(epoch_counter_cbk.last_epoch, 50)
multi_process_runner.run(
proc_early_stopping,
cluster_spec=test_base.create_cluster_spec(num_workers=2),
args=(self,))
if __name__ == '__main__':
multi_process_runner.test_main()
|
|
# Copyright 2015, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2015, Kevin Carter <kevin.carter@rackspace.com>
import hashlib
import logging
import os
import re
from ansible import errors
from jinja2.runtime import Undefined
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
"""Filter usage:
Simple filters that may be useful from within the stack
"""
def _deprecated(new_var, old_var=None, old_var_name=None,
new_var_name=None, removed_in=None, fatal=False):
"""Provide a deprecation warning on deprecated variables.
This filter will return the old_var value if defined along with a
deprecation warning that will inform the user that the old variable
should no longer be used.
In order to use this filter the old and new variable names must be provided
to the filter as a string which is used to render the warning message. The
removed_in option is used to give a date or release name where the old
option will be removed. Optionally, if fatal is set to True, the filter
will raise an exception if the old variable is used.
USAGE: {{ new_var | deprecated(old_var,
"old_var_name",
"new_var_name",
"removed_in",
false) }}
:param new_var: ``object``
:param old_var: ``object``
:param old_var_name: ``str``
:param new_var_name: ``str``
:param removed_in: ``str``
:param fatal: ``bol``
"""
_usage = (
'USAGE: '
'{{ new_var | deprecated(old_var=old_var, old_var_name="old_var_name",'
' new_var_name="new_var_name", removed_in="removed_in",'
' fatal=false) }}'
)
if not old_var_name:
raise errors.AnsibleUndefinedVariable(
'To use this filter you must provide the "old_var_name" option'
' with the string name of the old variable that will be'
' replaced. ' + _usage
)
if not new_var_name:
raise errors.AnsibleUndefinedVariable(
'To use this filter you must provide the "new_var_name" option'
' with the string name of the new variable that will replace the'
' deprecated one. ' + _usage
)
if not removed_in:
raise errors.AnsibleUndefinedVariable(
'To use this filter you must provide the "removed_in" option with'
' the string name of the release where the old_var will be'
' removed. ' + _usage
)
# If old_var is undefined or has a None value return the new_var value
if isinstance(old_var, Undefined) or not old_var:
return new_var
name = 'Ansible-Warning| '
log = logging.getLogger(name)
for handler in log.handlers:
if name == handler.name:
break
else:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.name = name
stream_format = logging.Formatter(
'%(asctime)s - %(name)s%(levelname)s => %(message)s'
)
stream_handler.setFormatter(stream_format)
log.setLevel(logging.DEBUG)
log.addHandler(stream_handler)
message = (
'Deprecated Option provided: Deprecated variable: "%(old)s", Removal'
' timeframe: "%(removed_in)s", Future usage: "%(new)s"'
% {'old': old_var_name, 'new': new_var_name, 'removed_in': removed_in}
)
if str(fatal).lower() in ['yes', 'true']:
message = 'Fatally %s' % message
log.fatal(message)
raise RuntimeError(message)
else:
log.warn(message)
return old_var
def _pip_requirement_split(requirement):
version_descriptors = "(>=|<=|>|<|==|~=|!=)"
requirement = requirement.split(';')
requirement_info = re.split(r'%s\s*' % version_descriptors, requirement[0])
name = requirement_info[0]
marker = None
if len(requirement) > 1:
marker = requirement[1]
versions = None
if len(requirement_info) > 1:
versions = requirement_info[1]
return name, versions, marker
def _lower_set_lists(list_one, list_two):
_list_one = set([i.lower() for i in list_one])
_list_two = set([i.lower() for i in list_two])
return _list_one, _list_two
def bit_length_power_of_2(value):
"""Return the smallest power of 2 greater than a numeric value.
:param value: Number to find the smallest power of 2
:type value: ``int``
:returns: ``int``
"""
return 2**(int(value)-1).bit_length()
def get_netloc(url):
"""Return the netloc from a URL.
If the input value is not a value URL the method will raise an Ansible
filter exception.
:param url: the URL to parse
:type url: ``str``
:returns: ``str``
"""
try:
netloc = urlparse(url).netloc
except Exception as exp:
raise errors.AnsibleFilterError(
'Failed to return the netloc of: "%s"' % str(exp)
)
else:
return netloc
def get_netloc_no_port(url):
"""Return the netloc without a port from a URL.
If the input value is not a value URL the method will raise an Ansible
filter exception.
:param url: the URL to parse
:type url: ``str``
:returns: ``str``
"""
return get_netloc(url=url).split(':')[0]
def get_netorigin(url):
"""Return the netloc from a URL.
If the input value is not a value URL the method will raise an Ansible
filter exception.
:param url: the URL to parse
:type url: ``str``
:returns: ``str``
"""
try:
parsed_url = urlparse(url)
netloc = parsed_url.netloc
scheme = parsed_url.scheme
except Exception as exp:
raise errors.AnsibleFilterError(
'Failed to return the netorigin of: "%s"' % str(exp)
)
else:
return '%s://%s' % (scheme, netloc)
def string_2_int(string):
"""Return the an integer from a string.
The string is hashed, converted to a base36 int, and the modulo of 10240
is returned.
:param string: string to retrieve an int from
:type string: ``str``
:returns: ``int``
"""
# Try to encode utf-8 else pass
try:
string = string.encode('utf-8')
except AttributeError:
pass
hashed_name = hashlib.sha256(string).hexdigest()
return int(hashed_name, 36) % 10240
def pip_requirement_names(requirements):
"""Return a ``str`` of requirement name and list of versions.
:param requirement: Name of a requirement that may have versions within
it. This will use the constant,
VERSION_DESCRIPTORS.
:type requirement: ``str``
:return: ``str``
"""
named_requirements = list()
for requirement in requirements:
name = _pip_requirement_split(requirement)[0]
if name and not name.startswith('#'):
named_requirements.append(name.lower())
return sorted(set(named_requirements))
def pip_constraint_update(list_one, list_two):
_list_one, _list_two = _lower_set_lists(list_one, list_two)
_list_one, _list_two = list(_list_one), list(_list_two)
for item2 in _list_two:
item2_name, item2_versions, _ = _pip_requirement_split(item2)
if item2_versions:
for item1 in _list_one:
if item2_name == _pip_requirement_split(item1)[0]:
item1_index = _list_one.index(item1)
_list_one[item1_index] = item2
break
else:
_list_one.append(item2)
return sorted(_list_one)
def splitlines(string_with_lines):
"""Return a ``list`` from a string with lines."""
return string_with_lines.splitlines()
def filtered_list(list_one, list_two):
_list_one, _list_two = _lower_set_lists(list_one, list_two)
return list(_list_one-_list_two)
def git_link_parse(repo):
"""Return a dict containing the parts of a git repository.
:param repo: git repo string to parse.
:type repo: ``str``
:returns: ``dict``
"""
if 'git+' in repo:
_git_url = repo.split('git+', 1)[-1]
else:
_git_url = repo
if '@' in _git_url:
url, branch = _git_url.split('@', 1)
else:
url = _git_url
branch = 'master'
name = os.path.basename(url.rstrip('/'))
_branch = branch.split('#')
branch = _branch[0]
plugin_path = None
# Determine if the package is a plugin type
if len(_branch) > 1 and 'subdirectory=' in _branch[-1]:
plugin_path = _branch[-1].split('subdirectory=')[-1].split('&')[0]
return {
'name': name.split('.git')[0].lower(),
'version': branch,
'plugin_path': plugin_path,
'url': url,
'original': repo
}
def git_link_parse_name(repo):
"""Return the name of a git repo."""
return git_link_parse(repo)['name']
def get_nested(target_dict, keys):
"""Retrieves values through a nested dictionary.
If any key on the path is missing, return None
This helps solves convoluted guards in roles/plays such as the following:
('openstack_ansible' not in ansible_local or
'swift' not in ansible_local['openstack_ansible'] or
'venv_tag' not in ansible_local['openstack_ansible']['swift'] or
ansible_local['openstack_ansible']['swift']['venv_tag'] == swift_venv_tag)
With this filter, it could be instead written:
ansible_local|get_nested('openstack_ansible.swift.venv_tag') == swift_venv_tag
"""
try:
key, next_keys = keys.split('.', 1)
except ValueError:
return target_dict.get(keys, None)
try:
next_dict = target_dict[key]
except KeyError:
return None
return get_nested(next_dict, next_keys)
class FilterModule(object):
"""Ansible jinja2 filters."""
@staticmethod
def filters():
return {
'bit_length_power_of_2': bit_length_power_of_2,
'netloc': get_netloc,
'netloc_no_port': get_netloc_no_port,
'netorigin': get_netorigin,
'string_2_int': string_2_int,
'pip_requirement_names': pip_requirement_names,
'pip_constraint_update': pip_constraint_update,
'splitlines': splitlines,
'filtered_list': filtered_list,
'git_link_parse': git_link_parse,
'git_link_parse_name': git_link_parse_name,
'deprecated': _deprecated,
'get_nested': get_nested
}
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_serialization import jsonutils
import six
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import utils
from neutron import context as neutron_context
from neutron.db import api as db_api
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.i18n import _LE
from neutron import manager
from neutron.plugins.common import constants as plugin_constants
LOG = logging.getLogger(__name__)
class L3RpcCallback(object):
"""L3 agent RPC callback in plugin implementations."""
# 1.0 L3PluginApi BASE_RPC_API_VERSION
# 1.1 Support update_floatingip_statuses
# 1.2 Added methods for DVR support
# 1.3 Added a method that returns the list of activated services
# 1.4 Added L3 HA update_router_state. This method was later removed,
# since it was unused. The RPC version was not changed
# 1.5 Added update_ha_routers_states
# 1.6 Added process_prefix_update to support IPv6 Prefix Delegation
# 1.7 Added method delete_agent_gateway_port for DVR Routers
target = oslo_messaging.Target(version='1.7')
@property
def plugin(self):
if not hasattr(self, '_plugin'):
self._plugin = manager.NeutronManager.get_plugin()
return self._plugin
@property
def l3plugin(self):
if not hasattr(self, '_l3plugin'):
self._l3plugin = manager.NeutronManager.get_service_plugins()[
plugin_constants.L3_ROUTER_NAT]
return self._l3plugin
@db_api.retry_db_errors
def sync_routers(self, context, **kwargs):
"""Sync routers according to filters to a specific agent.
@param context: contain user information
@param kwargs: host, router_ids
@return: a list of routers
with their interfaces and floating_ips
"""
router_ids = kwargs.get('router_ids')
host = kwargs.get('host')
context = neutron_context.get_admin_context()
if not self.l3plugin:
routers = {}
LOG.error(_LE('No plugin for L3 routing registered! Will reply '
'to l3 agent with empty router dictionary.'))
elif utils.is_extension_supported(
self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
if cfg.CONF.router_auto_schedule:
self.l3plugin.auto_schedule_routers(context, host, router_ids)
routers = (
self.l3plugin.list_active_sync_routers_on_active_l3_agent(
context, host, router_ids))
else:
routers = self.l3plugin.get_sync_data(context, router_ids)
if utils.is_extension_supported(
self.plugin, constants.PORT_BINDING_EXT_ALIAS):
self._ensure_host_set_on_ports(context, host, routers)
LOG.debug("Routers returned to l3 agent:\n %s",
utils.DelayedStringRenderer(jsonutils.dumps,
routers, indent=5))
return routers
def _ensure_host_set_on_ports(self, context, host, routers):
for router in routers:
LOG.debug("Checking router: %(id)s for host: %(host)s",
{'id': router['id'], 'host': host})
if router.get('gw_port') and router.get('distributed'):
# '' is used to effectively clear binding of a gw port if not
# bound (snat is not hosted on any l3 agent)
gw_port_host = router.get('gw_port_host') or ''
self._ensure_host_set_on_port(context,
gw_port_host,
router.get('gw_port'),
router['id'])
for p in router.get(constants.SNAT_ROUTER_INTF_KEY, []):
self._ensure_host_set_on_port(context,
gw_port_host,
p, router['id'])
else:
self._ensure_host_set_on_port(
context, host,
router.get('gw_port'),
router['id'],
ha_router_port=router.get('ha'))
for interface in router.get(constants.INTERFACE_KEY, []):
self._ensure_host_set_on_port(
context,
host,
interface,
router['id'],
ha_router_port=router.get('ha'))
interface = router.get(constants.HA_INTERFACE_KEY)
if interface:
self._ensure_host_set_on_port(context, host, interface,
router['id'])
def _ensure_host_set_on_port(self, context, host, port, router_id=None,
ha_router_port=False):
if (port and host is not None and
(port.get('device_owner') !=
constants.DEVICE_OWNER_DVR_INTERFACE and
port.get(portbindings.HOST_ID) != host or
port.get(portbindings.VIF_TYPE) ==
portbindings.VIF_TYPE_BINDING_FAILED)):
# Ports owned by non-HA routers are bound again if they're
# already bound but the router moved to another host.
if not ha_router_port:
# All ports, including ports created for SNAT'ing for
# DVR are handled here
try:
self.plugin.update_port(
context,
port['id'],
{'port': {portbindings.HOST_ID: host}})
# updating port's host to pass actual info to l3 agent
port[portbindings.HOST_ID] = host
except exceptions.PortNotFound:
LOG.debug("Port %(port)s not found while updating "
"agent binding for router %(router)s.",
{"port": port['id'], "router": router_id})
# Ports owned by HA routers should only be bound once, if
# they are unbound. These ports are moved when an agent reports
# that one of its routers moved to the active state.
else:
if not port.get(portbindings.HOST_ID):
active_host = (
self.l3plugin.get_active_host_for_ha_router(
context, router_id))
if active_host:
host = active_host
# If there is currently no active router instance (For
# example it's a new router), the host that requested
# the routers (Essentially a random host) will do. The
# port binding will be corrected when an active is
# elected.
try:
self.plugin.update_port(
context,
port['id'],
{'port': {portbindings.HOST_ID: host}})
except exceptions.PortNotFound:
LOG.debug("Port %(port)s not found while updating "
"agent binding for router %(router)s.",
{"port": port['id'], "router": router_id})
elif (port and
port.get('device_owner') ==
constants.DEVICE_OWNER_DVR_INTERFACE):
# Ports that are DVR interfaces have multiple bindings (based on
# of hosts on which DVR router interfaces are spawned). Such
# bindings are created/updated here by invoking
# update_dvr_port_binding
self.plugin.update_dvr_port_binding(context, port['id'],
{'port':
{portbindings.HOST_ID: host,
'device_id': router_id}
})
def get_external_network_id(self, context, **kwargs):
"""Get one external network id for l3 agent.
l3 agent expects only one external network when it performs
this query.
"""
context = neutron_context.get_admin_context()
net_id = self.plugin.get_external_network_id(context)
LOG.debug("External network ID returned to l3 agent: %s",
net_id)
return net_id
def get_service_plugin_list(self, context, **kwargs):
plugins = manager.NeutronManager.get_service_plugins()
return plugins.keys()
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Update operational status for a floating IP."""
with context.session.begin(subtransactions=True):
for (floatingip_id, status) in six.iteritems(fip_statuses):
LOG.debug("New status for floating IP %(floatingip_id)s: "
"%(status)s", {'floatingip_id': floatingip_id,
'status': status})
try:
self.l3plugin.update_floatingip_status(context,
floatingip_id,
status)
except l3.FloatingIPNotFound:
LOG.debug("Floating IP: %s no longer present.",
floatingip_id)
# Find all floating IPs known to have been the given router
# for which an update was not received. Set them DOWN mercilessly
# This situation might occur for some asynchronous backends if
# notifications were missed
known_router_fips = self.l3plugin.get_floatingips(
context, {'last_known_router_id': [router_id]})
# Consider only floating ips which were disassociated in the API
# FIXME(salv-orlando): Filtering in code should be avoided.
# the plugin should offer a way to specify a null filter
fips_to_disable = (fip['id'] for fip in known_router_fips
if not fip['router_id'])
for fip_id in fips_to_disable:
self.l3plugin.update_floatingip_status(
context, fip_id, constants.FLOATINGIP_STATUS_DOWN)
def get_ports_by_subnet(self, context, **kwargs):
"""DVR: RPC called by dvr-agent to get all ports for subnet."""
subnet_id = kwargs.get('subnet_id')
LOG.debug("DVR: subnet_id: %s", subnet_id)
filters = {'fixed_ips': {'subnet_id': [subnet_id]}}
return self.plugin.get_ports(context, filters=filters)
@db_api.retry_db_errors
def get_agent_gateway_port(self, context, **kwargs):
"""Get Agent Gateway port for FIP.
l3 agent expects an Agent Gateway Port to be returned
for this query.
"""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
admin_ctx = neutron_context.get_admin_context()
agent_port = self.l3plugin.create_fip_agent_gw_port_if_not_exists(
admin_ctx, network_id, host)
self._ensure_host_set_on_port(admin_ctx, host, agent_port)
LOG.debug('Agent Gateway port returned : %(agent_port)s with '
'host %(host)s', {'agent_port': agent_port,
'host': host})
return agent_port
def update_ha_routers_states(self, context, **kwargs):
"""Update states for HA routers.
Get a map of router_id to its HA state on a host and update the DB.
State must be in: ('active', 'standby').
"""
states = kwargs.get('states')
host = kwargs.get('host')
LOG.debug('Updating HA routers states on host %s: %s', host, states)
self.l3plugin.update_routers_states(context, states, host)
def process_prefix_update(self, context, **kwargs):
subnets = kwargs.get('subnets')
updated_subnets = []
for subnet_id, prefix in subnets.items():
updated_subnets.append(self.plugin.update_subnet(
context,
subnet_id,
{'subnet': {'cidr': prefix}}))
return updated_subnets
def delete_agent_gateway_port(self, context, **kwargs):
"""Delete Floatingip agent gateway port."""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
admin_ctx = neutron_context.get_admin_context()
self.l3plugin.delete_floatingip_agent_gateway_port(
admin_ctx, host, network_id)
|
|
__author__ = 'greg'
import re
import os
import zipfile
import math
import csv
import json
import numpy
import tarfile
import rollbar
class CsvOut:
def __init__(self,project):
print type(project)
# assert isinstance(project,aggregation_api.AggregationAPI)
self.project = project
self.project_id = project.project_id
self.instructions = project.instructions
self.workflow_names = project.workflow_names
self.workflows = project.workflows
self.__yield_aggregations__ = project.__yield_aggregations__
self.__count_check__ = project.__count_check__
self.retirement_thresholds = project.retirement_thresholds
self.versions = project.versions
# dictionaries to hold the output files
self.marking_csv_files = {}
self.classification_csv_files = {}
self.rollbar_token = project.rollbar_token
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
# if another instance is already running - don't do anything, just exit
# if no error happened - update the timestamp
# else - the next run will start at the old time stamp (which we want)
if self.rollbar_token is not None:
rollbar.init(self.rollbar_token,"production")
if exc_type is None:
rollbar.report_message("csv output worked corrected","info")
else:
rollbar.report_exc_info()
def __csv_classification_output__(self,workflow_id,task_id,subject_id,aggregations):
"""
add a row to the classifciation csv output file
"""
# first column is the subject id
row = str(subject_id)
# now go through each of the possible resposnes
for answer_index in self.instructions[workflow_id][task_id]["answers"].keys():
# at some point the integer indices seem to have been converted into strings
# if a value isn't there - use 0
if str(answer_index) in aggregations[0].keys():
row += "," + str(aggregations[0][str(answer_index)])
else:
row += ",0"
# add the number of people who saw this subject
row += "," + str(aggregations[1])
self.classification_csv_files[task_id].write(row+"\n")
def __csv_classification_header_setup__(self,workflow_id,task,output_directory):
"""
create the csv headers for classification tasks
:param workflow_id:
:param task:
:param output_directory:
:return:
"""
fname = self.instructions[workflow_id][task]["instruction"][:50]
# remove any characters which shouldn't be in a file name
fname = re.sub(" ","_",fname)
fname = re.sub("\?","",fname)
fname = re.sub("\*","",fname)
fname += ".csv"
self.classification_csv_files[task] = open(output_directory+fname,"wb")
header = "subject_id"
for answer_index in sorted(self.instructions[workflow_id][task]["answers"].keys()):
answer = self.instructions[workflow_id][task]["answers"][answer_index]
answer = re.sub(",","",answer)
answer = re.sub(" ","_",answer)
header += ",p("+answer+")"
header += ",num_users"
self.classification_csv_files[task].write(header+"\n")
def __csv_file_setup__(self,workflow_id):
"""
open csv files for each output and write headers for each file
"""
# close any previously opened files - needed when we have multiple workflows per project
for f in self.marking_csv_files.values():
assert isinstance(f,file)
f.close()
for f in self.classification_csv_files.values():
assert isinstance(f,file)
f.close()
# and reset
self.marking_csv_files = {}
self.classification_csv_files = {}
# start by creating a directory specific to this project
output_directory = "/tmp/"+str(self.project_id)+"/"
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# now make the directory specific to the workflow
# first - remove any bad characters
workflow_name = self.workflow_names[workflow_id]
workflow_name = re.sub(" ","_",workflow_name)
output_directory += workflow_name +"/"
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# create headers to eachcsv file
classification_tasks,marking_tasks = self.workflows[workflow_id]
for task in marking_tasks:
self.__csv_marking_header_setup__(workflow_id,task,set(marking_tasks[task]),output_directory)
for task in classification_tasks:
print "creating header for classification task " + str(task)
self.__csv_classification_header_setup__(workflow_id,task,output_directory)
# # use the instruction label to create the csv file name
# # todo - what if the instruction labels are the same?
# fname = self.instructions[workflow_id][task]["instruction"][:50]
#
# # remove any characters which shouldn't be in a file name
# fname = re.sub(" ","_",fname)
# fname = re.sub("\?","",fname)
# fname = re.sub("\*","",fname)
# fname += ".csv"
# self.classification_csv_files[task] = open(output_directory+fname,"wb")
# header = "subject_id"
# for answer_index in sorted(self.instructions[workflow_id][task]["answers"].keys()):
# answer = self.instructions[workflow_id][task]["answers"][answer_index]
# answer = re.sub(",","",answer)
# answer = re.sub(" ","_",answer)
# header += ",p("+answer+")"
# header += ",num_users"
# self.classification_csv_files[task].write(header+"\n")
def __write_out__(self,subject_set = None,compress=True):
"""
create the csv outputs for a given set of workflows
the workflows are specified by self.workflows which is determined when the aggregation engine starts
a zipped file is created in the end
"""
assert (subject_set is None) or isinstance(subject_set,int)
tarball = None
if compress:
tarball = tarfile.open("/tmp/"+str(self.project_id)+"export.tar.gz", "w:gz")
for workflow_id in self.workflows:
print "csv output for workflow - " + str(workflow_id)
self.__csv_file_setup__(workflow_id)
classification_tasks,marking_tasks = self.workflows[workflow_id]
for subject_id,task_id,aggregations in self.__yield_aggregations__(workflow_id,subject_set):
# check to see if the correct number of classifications were received
# todo - this is only a stop gap measure until we figure out why some subjects are being
# todo - retired early. Once that is done, we can remove this
# if self.__count_check__(workflow_id,subject_id) < self.retirement_thresholds[workflow_id]:
# print "skipping"
# continue
# are there markings associated with this task?
if task_id in marking_tasks:
for shape in set(marking_tasks[task_id]):
if shape == "polygon":
self.__polygon_summary_output__(workflow_id,task_id,subject_id,aggregations)
self.__polygon_heatmap_output__(workflow_id,task_id,subject_id,aggregations)
# self.__csv_marking__output__(workflow_id,task_id,subject_id,aggregations,marking_tasks[task_id])
# are there any classifications associated with this task
if task_id in classification_tasks:
self.__csv_classification_output__(workflow_id,task_id,subject_id,aggregations)
for fname,f in self.classification_csv_files.items():
assert isinstance(f,file)
if compress:
print "writing out " + fname
f.close()
with open(f.name, "rb") as readfile:
tarInfo = tarball.gettarinfo(fileobj=readfile)
tarball.addfile(tarInfo, fileobj=readfile)
# f.close()
for f in self.marking_csv_files.values():
assert isinstance(f,file)
if compress:
f.close()
with open(f.name, "rb") as readfile:
tarInfo = tarball.gettarinfo(fileobj=readfile)
tarball.addfile(tarInfo, fileobj=readfile)
# finally zip everything (over all workflows) into one zip file
# self.__csv_to_zip__()
if compress:
tarball.close()
return "/tmp/"+str(self.project_id)+"export.tar.gz"
# def __csv_annotations__(self,workflow_id_filter,subject_set):
# # find the major id of the workflow we are filtering
# version_filter = int(math.floor(float(self.versions[workflow_id_filter])))
#
# if subject_set is None:
# subject_set = self.__load_subjects__(workflow_id_filter)
#
# with open(self.csv_classification_file, 'rb') as csvfile:
# reader = csv.reader(csvfile, delimiter=',', quotechar='|')
#
# for row in reader:
# subject_data = row[8]
# annotations = row[7]
# workflow_id = row[2]
# workflow_version = row[4]
#
# # convert to json form
# subject_data = json.loads(subject_data)
# subject_id = subject_data.keys()[0]
#
# # csv file contains classifications from every workflow - so make sure we find
# # only the one we currently want
# if int(workflow_id) != workflow_id_filter:
# continue
#
# # if these are not one of the subjects we are looking for
# if subject_id not in subject_set:
# continue
#
# # convert to float
# workflow_version = float(workflow_version)
# # if we are not at the correct major version id, skip
# if workflow_version < version_filter:
# continue
def __csv_marking_header_setup__(self,workflow_id,task,tools,output_directory):
"""
tools - says what sorts of different types of shapes/tools we have to do deal with for this task
we can either give the output for each tool in a completely different csv file - more files, might
be slightly overwhelming, but then we could make the column headers more understandable
"""
if "polygon" in tools:
key = task+"polygon_summary"
self.marking_csv_files[key] = open(output_directory+task+"_polygons_summary.csv","wb")
header = "subject_id,num_users,minimum_users_per_cluster,area(noise),tool_certainity"
for tool_id in sorted(self.instructions[workflow_id][task]["tools"].keys()):
tool = self.instructions[workflow_id][task]["tools"][tool_id]["marking tool"]
tool = re.sub(" ","_",tool)
header += ",area("+tool+")"
self.marking_csv_files[key].write(header+"\n")
key = task+"polygon_heatmap"
self.marking_csv_files[key] = open(output_directory+task+"_polygons_heatmap.csv","wb")
header = "subject_id,num_users,pts"
self.marking_csv_files[key].write(header+"\n")
# print workflow_id
# print task
# assert False
# # build up the header row
# header = "subject_id"
# for tool_id in sorted(self.instructions[workflow_id][task]["tools"].keys()):
# tool = self.instructions[workflow_id][task]["tools"][tool_id]["marking tool"]
# header += ","+tool
# header += ",mean probability,median probability,mean tool likelihood,median tool likelihood,number of users"
# self.marking_csv_files[task].write(header+"\n")
def __polygon_heatmap_output__(self,workflow_id,task_id,subject_id,aggregations):
"""
print out regions according to how many users selected that user - so we can a heatmap
of the results
:param workflow_id:
:param task_id:
:param subject_id:
:param aggregations:
:return:
"""
key = task_id+"polygon_heatmap"
for cluster_index,cluster in aggregations["polygon clusters"].items():
# each cluster refers to a specific tool type - so there can actually be multiple blobs
# (or clusters) per cluster
# not actually clusters
if cluster_index in ["param","all_users"]:
continue
if cluster["tool classification"] is not None:
# this result is not relevant to the heatmap
continue
row = str(subject_id) + "," + str(cluster["num users"]) + ",\"" + str(cluster["center"]) + "\""
self.marking_csv_files[key].write(row+"\n")
def __polygon_summary_output__(self,workflow_id,task_id,subject_id,aggregations):
"""
print out a csv summary of the polygon aggregations (so not the individual xy points)
need to know the workflow and task id so we can look up the instructions
that way we can know if there is no output for a given tool - that tool wouldn't appear
at all in the aggregations
"""
# find out which tools actually corresponds to polygons - they could correspond to other tools/shapes
marking_shapes = self.workflows[workflow_id][1][task_id]
polygon_tools = [tool_id for tool_id,shape in enumerate(marking_shapes) if shape == "polygon"]
area_per_type = {}#t:0 for t in polygon_tools}
certainty_per_type = {}#t: -1 for t in polygon_tools}
row = str(subject_id)
# if noise_area stays 0, that means that there wasn't any noise at all :)
noise_area = 0
num_users = 0
for cluster_index,cluster in aggregations["polygon clusters"].items():
# each cluster refers to a specific tool type - so there can actually be multiple blobs
# (or clusters) per cluster
# not actually clusters
if cluster_index == "all_users":
num_users = len(cluster)
continue
if cluster_index in ["param","all_users"]:
continue
if cluster["tool classification"] is None:
# this result is not relevant to the summary stats
continue
# this value will just get repeatedly read in - which is fine
noise_area = cluster["incorrect area"]
# cluster = -1 => empty image
if cluster["certainty"] >= 0:
most_likely_type = cluster["tool classification"]
area_per_type[most_likely_type] = cluster["area"]
certainty_per_type[most_likely_type] = cluster["certainty"]
row += ","+str(num_users)
# todo - don't hard code this
row += ",3"
row += "," + str(noise_area)
# calculate the overall (weighted) certainty
area = [area_per_type[t] for t in polygon_tools if t in area_per_type]
certainty = [certainty_per_type[t] for t in polygon_tools if t in certainty_per_type]
assert len(area) == len(certainty)
if area != []:
weighted_overall_certainty = numpy.average(certainty,weights =area)
else:
weighted_overall_certainty = "NA"
row += ","+str(weighted_overall_certainty)
for t in polygon_tools:
if t in area_per_type:
row += ","+str(area_per_type[t])
else:
row += ",0"
key = task_id+"polygon_summary"
self.marking_csv_files[key].write(row+"\n")
def __csv_to_zip__(self):
"""
put the results into a nice csv file
"""
# code taken from http://stackoverflow.com/questions/1855095/how-to-create-a-zip-archive-of-a-directory
zipf = zipfile.ZipFile("/tmp/"+str(self.project_id)+".zip", 'w')
# walk through the output directory, compressing as we go
for root, dirs, files in os.walk("/tmp/"+str(self.project_id)+"/"):
for file in files:
zipf.write(os.path.join(root, file))
zipf.close()
|
|
"""Support for Eight Sleep sensors."""
from __future__ import annotations
import logging
from typing import Any
from pyeight.eight import EightSleep
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType
from . import (
CONF_SENSORS,
DATA_API,
DATA_EIGHT,
DATA_HEAT,
DATA_USER,
EightSleepBaseEntity,
EightSleepHeatDataCoordinator,
EightSleepUserDataCoordinator,
EightSleepUserEntity,
)
ATTR_ROOM_TEMP = "Room Temperature"
ATTR_AVG_ROOM_TEMP = "Average Room Temperature"
ATTR_BED_TEMP = "Bed Temperature"
ATTR_AVG_BED_TEMP = "Average Bed Temperature"
ATTR_RESP_RATE = "Respiratory Rate"
ATTR_AVG_RESP_RATE = "Average Respiratory Rate"
ATTR_HEART_RATE = "Heart Rate"
ATTR_AVG_HEART_RATE = "Average Heart Rate"
ATTR_SLEEP_DUR = "Time Slept"
ATTR_LIGHT_PERC = f"Light Sleep {PERCENTAGE}"
ATTR_DEEP_PERC = f"Deep Sleep {PERCENTAGE}"
ATTR_REM_PERC = f"REM Sleep {PERCENTAGE}"
ATTR_TNT = "Tosses & Turns"
ATTR_SLEEP_STAGE = "Sleep Stage"
ATTR_TARGET_HEAT = "Target Heating Level"
ATTR_ACTIVE_HEAT = "Heating Active"
ATTR_DURATION_HEAT = "Heating Time Remaining"
ATTR_PROCESSING = "Processing"
ATTR_SESSION_START = "Session Start"
ATTR_FIT_DATE = "Fitness Date"
ATTR_FIT_DURATION_SCORE = "Fitness Duration Score"
ATTR_FIT_ASLEEP_SCORE = "Fitness Asleep Score"
ATTR_FIT_OUT_SCORE = "Fitness Out-of-Bed Score"
ATTR_FIT_WAKEUP_SCORE = "Fitness Wakeup Score"
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: dict[str, list[tuple[str, str]]] = None,
) -> None:
"""Set up the eight sleep sensors."""
if discovery_info is None:
return
name = "Eight"
sensors = discovery_info[CONF_SENSORS]
eight: EightSleep = hass.data[DATA_EIGHT][DATA_API]
heat_coordinator: EightSleepHeatDataCoordinator = hass.data[DATA_EIGHT][DATA_HEAT]
user_coordinator: EightSleepUserDataCoordinator = hass.data[DATA_EIGHT][DATA_USER]
if hass.config.units.is_metric:
units = "si"
else:
units = "us"
all_sensors: list[SensorEntity] = []
for side, sensor in sensors:
if sensor == "bed_state":
all_sensors.append(
EightHeatSensor(name, heat_coordinator, eight, side, sensor)
)
elif sensor == "room_temperature":
all_sensors.append(
EightRoomSensor(name, user_coordinator, eight, side, sensor, units)
)
else:
all_sensors.append(
EightUserSensor(name, user_coordinator, eight, side, sensor, units)
)
async_add_entities(all_sensors)
class EightHeatSensor(EightSleepBaseEntity, SensorEntity):
"""Representation of an eight sleep heat-based sensor."""
def __init__(
self,
name: str,
coordinator: EightSleepHeatDataCoordinator,
eight: EightSleep,
side: str | None,
sensor: str,
) -> None:
"""Initialize the sensor."""
super().__init__(name, coordinator, eight, side, sensor)
self._attr_native_unit_of_measurement = PERCENTAGE
assert self._usrobj
_LOGGER.debug(
"Heat Sensor: %s, Side: %s, User: %s",
self._sensor,
self._side,
self._usrobj.userid,
)
@property
def native_value(self) -> int:
"""Return the state of the sensor."""
assert self._usrobj
return self._usrobj.heating_level
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return device state attributes."""
assert self._usrobj
return {
ATTR_TARGET_HEAT: self._usrobj.target_heating_level,
ATTR_ACTIVE_HEAT: self._usrobj.now_heating,
ATTR_DURATION_HEAT: self._usrobj.heating_remaining,
}
def _get_breakdown_percent(
attr: dict[str, Any], key: str, denominator: int | float
) -> int | float:
"""Get a breakdown percent."""
try:
return round((attr["breakdown"][key] / denominator) * 100, 2)
except ZeroDivisionError:
return 0
class EightUserSensor(EightSleepUserEntity, SensorEntity):
"""Representation of an eight sleep user-based sensor."""
def __init__(
self,
name: str,
coordinator: EightSleepUserDataCoordinator,
eight: EightSleep,
side: str | None,
sensor: str,
units: str,
) -> None:
"""Initialize the sensor."""
super().__init__(name, coordinator, eight, side, sensor, units)
if self._sensor == "bed_temperature":
self._attr_icon = "mdi:thermometer"
_LOGGER.debug(
"User Sensor: %s, Side: %s, User: %s",
self._sensor,
self._side,
self._usrobj.userid if self._usrobj else None,
)
@property
def native_value(self) -> str | int | float | None:
"""Return the state of the sensor."""
if not self._usrobj:
return None
if "current" in self._sensor:
if "fitness" in self._sensor:
return self._usrobj.current_sleep_fitness_score
return self._usrobj.current_sleep_score
if "last" in self._sensor:
return self._usrobj.last_sleep_score
if self._sensor == "bed_temperature":
temp = self._usrobj.current_values["bed_temp"]
try:
if self._units == "si":
return round(temp, 2)
return round((temp * 1.8) + 32, 2)
except TypeError:
return None
if self._sensor == "sleep_stage":
return self._usrobj.current_values["stage"]
return None
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit the value is expressed in."""
if self._sensor in ("current_sleep", "last_sleep", "current_sleep_fitness"):
return "Score"
if self._sensor == "bed_temperature":
if self._units == "si":
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
return None
def _get_rounded_value(
self, attr: dict[str, Any], key: str, use_units: bool = True
) -> int | float | None:
"""Get rounded value based on units for given key."""
try:
if self._units == "si" or not use_units:
return round(attr["room_temp"], 2)
return round((attr["room_temp"] * 1.8) + 32, 2)
except TypeError:
return None
@property
def extra_state_attributes(self) -> dict[str, Any] | None:
"""Return device state attributes."""
attr = None
if "current" in self._sensor and self._usrobj:
if "fitness" in self._sensor:
attr = self._usrobj.current_fitness_values
else:
attr = self._usrobj.current_values
elif "last" in self._sensor and self._usrobj:
attr = self._usrobj.last_values
if attr is None:
# Skip attributes if sensor type doesn't support
return None
if "fitness" in self._sensor:
state_attr = {
ATTR_FIT_DATE: attr["date"],
ATTR_FIT_DURATION_SCORE: attr["duration"],
ATTR_FIT_ASLEEP_SCORE: attr["asleep"],
ATTR_FIT_OUT_SCORE: attr["out"],
ATTR_FIT_WAKEUP_SCORE: attr["wakeup"],
}
return state_attr
state_attr = {ATTR_SESSION_START: attr["date"]}
state_attr[ATTR_TNT] = attr["tnt"]
state_attr[ATTR_PROCESSING] = attr["processing"]
if attr.get("breakdown") is not None:
sleep_time = sum(attr["breakdown"].values()) - attr["breakdown"]["awake"]
state_attr[ATTR_SLEEP_DUR] = sleep_time
state_attr[ATTR_LIGHT_PERC] = _get_breakdown_percent(
attr, "light", sleep_time
)
state_attr[ATTR_DEEP_PERC] = _get_breakdown_percent(
attr, "deep", sleep_time
)
state_attr[ATTR_REM_PERC] = _get_breakdown_percent(attr, "rem", sleep_time)
room_temp = self._get_rounded_value(attr, "room_temp")
bed_temp = self._get_rounded_value(attr, "bed_temp")
if "current" in self._sensor:
state_attr[ATTR_RESP_RATE] = self._get_rounded_value(
attr, "resp_rate", False
)
state_attr[ATTR_HEART_RATE] = self._get_rounded_value(
attr, "heart_rate", False
)
state_attr[ATTR_SLEEP_STAGE] = attr["stage"]
state_attr[ATTR_ROOM_TEMP] = room_temp
state_attr[ATTR_BED_TEMP] = bed_temp
elif "last" in self._sensor:
state_attr[ATTR_AVG_RESP_RATE] = self._get_rounded_value(
attr, "resp_rate", False
)
state_attr[ATTR_AVG_HEART_RATE] = self._get_rounded_value(
attr, "heart_rate", False
)
state_attr[ATTR_AVG_ROOM_TEMP] = room_temp
state_attr[ATTR_AVG_BED_TEMP] = bed_temp
return state_attr
class EightRoomSensor(EightSleepUserEntity, SensorEntity):
"""Representation of an eight sleep room sensor."""
def __init__(
self,
name: str,
coordinator: EightSleepUserDataCoordinator,
eight: EightSleep,
side: str | None,
sensor: str,
units: str,
) -> None:
"""Initialize the sensor."""
super().__init__(name, coordinator, eight, side, sensor, units)
self._attr_icon = "mdi:thermometer"
self._attr_native_unit_of_measurement: str = (
TEMP_CELSIUS if self._units == "si" else TEMP_FAHRENHEIT
)
@property
def native_value(self) -> int | float | None:
"""Return the state of the sensor."""
temp = self._eight.room_temperature()
try:
if self._units == "si":
return round(temp, 2)
return round((temp * 1.8) + 32, 2)
except TypeError:
return None
|
|
# -*- coding: utf-8 -*-
"""Module providing views for a contentpage section"""
from AccessControl import Unauthorized
from Acquisition import aq_inner
from Acquisition import aq_parent
from plone import api
from plone.api.exc import InvalidParameterError
from plone.protect.utils import addTokenToUrl
from Products.CMFPlone.utils import safe_unicode
from Products.Five.browser import BrowserView
from zope.component import getMultiAdapter
from zope.component import getUtility
from aha.sitecontent.mailer import create_plaintext_message
from aha.sitecontent.mailer import prepare_email_message
from aha.sitecontent.mailer import get_mail_template
from aha.sitecontent.mailer import send_mail
from aha.sitecontent.interfaces import IResponsiveImagesTool
from aha.sitecontent import _
class PageSectionView(BrowserView):
""" Page Section default view """
def __call__(self):
return self.render()
def render(self):
return self.index()
def parent_page(self):
return aq_parent(aq_inner(self.context))
def rendered_page_snippet(self):
context = aq_inner(self.context)
snippet = context.restrictedTraverse('@@pagesection-snippet')()
if context.displayInquiryForm:
snippet = context.restrictedTraverse('@@page-section-form')()
return snippet
class PageSectionSnippet(BrowserView):
""" Embeddable section content snippet """
def field_has_data(self, fieldname):
""" Check wether a given schema key returns a value"""
context = aq_inner(self.context)
try:
video_link = getattr(context, fieldname, None)
except AttributeError:
video_link = None
if video_link is not None:
return True
return False
def has_video_link(self):
return self.field_has_data('videoLink')
def has_external_image(self):
return self.field_has_data('externalImage')
def show_image(self):
display = True
if self.has_video_link() or self.has_external_image():
display = False
return display
def get_image_data(self, uuid):
tool = getUtility(IResponsiveImagesTool)
return tool.create(uuid)
class PageSectionForm(BrowserView):
""" Embeddable section content snippet including inquiry form """
def __call__(self):
return self.render()
def update(self):
self.errors = {}
unwanted = ('_authenticator', 'form.button.Submit')
required = ['email']
required_boolean = ('privacy-policy-agreement', 'privacy-policy')
if 'form.button.Submit' in self.request:
authenticator = getMultiAdapter((self.context, self.request),
name=u"authenticator")
if not authenticator.verify():
raise Unauthorized
form = self.request.form
form_data = {}
form_errors = {}
error_idx = 0
if self.privacy_policy_enabled():
for field_name in required_boolean:
if not field_name in form:
form_errors[field_name] = self.required_field_error()
error_idx += 1
for value in form:
if value not in unwanted:
form_data[value] = safe_unicode(form[value])
if not form[value] and value in required:
form_errors[value] = self.required_field_error()
error_idx += 1
else:
error = {
'active': False,
'msg': form[value]
}
form_errors[value] = error
if error_idx > 0:
self.errors = form_errors
else:
self.send_inquiry(form)
def render(self):
self.update()
return self.index()
def default_value(self, error):
value = ''
if error['active'] is False:
value = error['msg']
return value
@staticmethod
def required_field_error():
translation_service = api.portal.get_tool(name="translation_service")
error = {}
error_msg = _(u"This field is required")
error['active'] = True
error['msg'] = translation_service.translate(
error_msg,
'aha.sitecontent',
target_language=api.portal.get_default_language()
)
return error
@staticmethod
def privacy_policy_enabled():
return True
@staticmethod
def privacy_policy_url():
portal = api.portal.get()
portal_url = portal.absolute_url()
url = '{0}/raum/datenschutzerklaerung'.format(portal_url)
return url
def send_inquiry(self, data):
context = aq_inner(self.context)
subject = _(u"Inquiry from website visitor")
email_subject = api.portal.translate(
"Inquiry from website visitor",
'aha.sitecontent',
api.portal.get_current_language())
data['subject'] = email_subject
mail_tpl = self._compose_message(data)
mail_plain = create_plaintext_message(mail_tpl)
msg = prepare_email_message(mail_tpl, mail_plain)
recipients = ['service@aha360.com', ]
send_mail(
msg,
recipients,
email_subject
)
context_parent = aq_parent(context)
next_url = '{0}/@@inquiry-form-dispatched/'.format(
context_parent.absolute_url()
)
url = addTokenToUrl(next_url)
return self.request.response.redirect(url)
def _compose_message(self, data):
portal = api.portal.get()
portal_url = portal.absolute_url()
template_vars = {
'email': data['email'],
'subject': str(data['subject']),
'fullname': data['fullname'],
'phone': data['phone'],
'message': data['comment'],
'url': portal_url
}
template_name = 'inquiry-mail.html'
message = get_mail_template(template_name, template_vars)
return message
def field_has_data(self, fieldname):
""" Check wether a given schema key returns a value"""
context = aq_inner(self.context)
try:
video_link = getattr(context, fieldname, None)
except AttributeError:
video_link = None
if video_link is not None:
return True
return False
def has_video_link(self):
return self.field_has_data('videoLink')
def has_external_image(self):
return self.field_has_data('externalImage')
def show_image(self):
display = True
if self.has_video_link() or self.has_external_image():
display = False
return display
def get_image_data(self, uuid):
tool = getUtility(IResponsiveImagesTool)
return tool.create(uuid)
|
|
#!/usr/bin/env python
"""A class for handling 5C read data."""
import os
import sys
import numpy
import h5py
try:
import pysam
except:
pass
class FiveCData(object):
"""
This class handles interaction count data for 5C experiments.
The FiveCData class contains all of the interaction information for a 5C experiment, including pairs of fragment indices and their associated counts.
.. note::
This class is also available as hifive.FiveCData
When initialized, this class creates an h5dict in which to store all data associated with this object.
:param filename: The file name of the h5dict. This should end with the suffix '.hdf5'
:type filename: str.
:param mode: The mode to open the h5dict with. This should be 'w' for creating or overwriting an h5dict with name given in filename.
:type mode: str.
:param silent: Indicates whether to print information about function execution for this object.
:type silent: bool.
:returns: :class:`FiveCData` class object.
:Attributes: * **file** (*str.*) A string containing the name of the file passed during object creation for saving the object to.
* **silent** (*bool.*) - A boolean indicating whether to suppress all of the output messages.
* **history** (*str.*) - A string containing all of the commands executed on this object and their outcomes.
"""
def __init__(self, filename, mode='r', silent=False):
"""Create a :class:`FiveCData` object."""
self.file = os.path.abspath(filename)
self.silent = silent
self.history = ''
if mode != 'w':
self.load()
return None
def __getitem__(self, key):
"""Dictionary-like lookup."""
if key in self.__dict__:
return self.__dict__[key]
else:
return None
def __setitem__(self, key, value):
"""Dictionary-like value setting."""
self.__dict__[key] = value
return None
def save(self):
"""
Save analysis parameters to h5dict.
:returns: None
"""
self.history.replace("'None'", "None")
datafile = h5py.File(self.file, 'w')
for key in self.__dict__.keys():
if key in ['file', 'chr2int', 'frags', 'silent']:
continue
elif isinstance(self[key], numpy.ndarray):
datafile.create_dataset(key, data=self[key])
elif not isinstance(self[key], dict):
datafile.attrs[key] = self[key]
datafile.close()
return None
def load(self):
"""
Load data from h5dict specified at object creation.
Any call of this function will overwrite current object data with values from the last :func:`save` call.
:returns: None
"""
datafile = h5py.File(self.file, 'r')
for key in datafile.keys():
self[key] = numpy.copy(datafile[key])
for key in datafile['/'].attrs.keys():
self[key] = datafile['/'].attrs[key]
# ensure fend h5dict exists
if 'fragfilename' in self.__dict__:
fragfilename = self.fragfilename
if fragfilename[:2] == './':
fragfilename = fragfilename[2:]
parent_count = fragfilename.count('../')
fragfilename = '/'.join(self.file.split('/')[:-(1 + parent_count)] +
fragfilename.lstrip('/').split('/')[parent_count:])
if not os.path.exists(fragfilename):
if not self.silent:
print >> sys.stderr, ("Could not find %s. No fragments loaded.\n") % (fragfilename),
else:
self.frags = h5py.File(fragfilename, 'r')
# create dictionary for converting chromosome names to indices
self.chr2int = {}
for i, chrom in enumerate(self.frags['chromosomes']):
self.chr2int[chrom] = i
datafile.close()
return None
def load_data_from_counts(self, fragfilename, filelist):
"""
Read interaction counts from a text file(s) and place in h5dict.
:param fragfilename: This specifies the file name of the :class:`Fragment` object to associate with the dataset.
:type fragfilename: str.
:param filelist: A list containing all of the file names of counts text files to be included in the dataset. If only one file is needed, this may be passed as a string.
:type filelist: list
:returns: None
:Attributes: * **fragfilename** (*str.*) - A string containing the relative path of the fragment file.
* **cis_data** (*ndarray*) - A numpy array of type int32 and shape N x 3 where N is the number of valid non-zero intra-regional fragment pairings observed in the data. The first column contains the fragment index (from the 'fragments' array in the Fragment object) of the upstream fragment, the second column contains the idnex of the downstream fragment, and the third column contains the number of reads observed for that fragment pair.
* **cis_indices** (*ndarray*) - A numpy array of type int64 and a length of the number of fragments + 1. Each position contains the first entry for the correspondingly-indexed fragment in the first column of 'cis_data'. For example, all of the downstream cis interactions for the fragment at index 5 in the Fragment object 'fragments' array are in cis_data[cis_indices[5]:cis_indices[6], :].
* **trans_data** (*ndarray*) - A numpy array of type int32 and shape N x 3 where N is the number of valid non-zero inter-regional fragment pairings observed in the data. The first column contains the fragment index (from the 'fragments' array in the Fragment object) of the upstream fragment (upstream also refers to the lower indexed chromosome in this context), the second column contains the index of the downstream fragment, and the third column contains the number of reads observed for that fragment pair.
* **trans_indices** (*ndarray*) - A numpy array of type int64 and a length of the number of fragments + 1. Each position contains the first entry for the correspondingly-indexed fragment in the first column of 'trans_data'. For example, all of the downstream trans interactions for the fragment at index 5 in the Fragment object 'fragments' array are in cis_data[cis_indices[5]:cis_indices[6], :].
* **frags** (*ndarray*) - A filestream to the hdf5 Fragment file such that all saved Fragment attributes can be accessed through this class attribute.
When data is loaded the 'history' attribute is updated to include the history of the Fragment file that becomes associated with it.
"""
self.history += "FiveCData.load_data_from_counts(fragfilename='%s', filelist=%s) - " % (fragfilename, str(filelist))
# determine if fragment file exists and if so, load it
if not os.path.exists(fragfilename):
if not self.silent:
print >> sys.stderr, \
("The fragment file %s was not found. No data was loaded.\n") % (fragfilename),
self.history += "Error: '%s' not found\n" % fragfilename
return None
self.fragfilename = "%s/%s" % (os.path.relpath(os.path.dirname(os.path.abspath(fragfilename)),
os.path.dirname(self.file)), os.path.basename(fragfilename))
self.frags = h5py.File(fragfilename, 'r')
self.history = self.frags['/'].attrs['history'] + self.history
strands = self.frags['fragments']['strand'][...]
chr2int = {}
for i, j in enumerate(self.frags['chromosomes'][:]):
chr2int[j] = i
# create fragment name dictionary
names = {}
for i in range(self.frags['fragments'].shape[0]):
names[self.frags['fragments']['name'][i]] = i
# load data from all files, skipping if name not in the fragment file.
if isinstance(filelist, str):
filelist = [filelist]
total_reads = 0
data = {}
for fname in filelist:
reads = 0
if not os.path.exists(fname):
if not self.silent:
print >> sys.stderr, ("The file %s was not found...skipped.\n") % (fname.split('/')[-1]),
self.history += "Error: '%s' not found, " % fname
continue
if not self.silent:
print >> sys.stderr, ("Loading data from %s...") % (fname.split('/')[-1]),
input = open(fname, 'r')
for line in input:
temp = line.strip('\n').split('\t')
if temp[0] not in names or temp[1] not in names or temp[0] == temp[1]:
continue
frag1 = names[temp[0]]
frag2 = names[temp[1]]
# if both in same orientation, skip
if strands[frag1] == strands[frag2]:
continue
pair = (min(frag1, frag2), max(frag1, frag2))
if pair not in data:
data[pair] = 0
data[pair] = int(temp[2])
reads += int(temp[2])
input.close()
if not self.silent:
print >> sys.stderr, ("%i validly-mapped reads loaded.\n") % (reads),
total_reads += reads
if len(data) == 0:
if not self.silent:
print >> sys.stderr, ("No valid data was loaded.\n"),
return None
self.history += "Error: no valid data loaded\n"
if not self.silent:
print >> sys.stderr, ("%i total validly-mapped read pairs loaded. %i unique pairs\n") %\
(total_reads,len(data)),
# write fragment pairs to h5dict
self._parse_fragment_pairs(data)
self.history += "Success\n"
return None
def load_data_from_bam(self, fragfilename, filelist):
"""
Read interaction counts from pairs of BAM files and place in h5dict.
:param fragfilename: This specifies the file name of the :class:`Fragment` object to associate with the dataset.
:type fragfilename: str.
:param filelist: A list containing lists of paired read end files.
:type filelist: list
:returns: None
:Attributes: * **fragfilename** (*str.*) - A string containing the relative path of the fragment file.
* **cis_data** (*ndarray*) - A numpy array of type int32 and shape N x 3 where N is the number of valid non-zero intra-regional fragment pairings observed in the data. The first column contains the fragment index (from the 'fragments' array in the Fragment object) of the upstream fragment, the second column contains the idnex of the downstream fragment, and the third column contains the number of reads observed for that fragment pair.
* **cis_indices** (*ndarray*) - A numpy array of type int64 and a length of the number of fragments + 1. Each position contains the first entry for the correspondingly-indexed fragment in the first column of 'cis_data'. For example, all of the downstream cis interactions for the fragment at index 5 in the Fragment object 'fragments' array are in cis_data[cis_indices[5]:cis_indices[6], :].
* **trans_data** (*ndarray*) - A numpy array of type int32 and shape N x 3 where N is the number of valid non-zero inter-regional fragment pairings observed in the data. The first column contains the fragment index (from the 'fragments' array in the Fragment object) of the upstream fragment (upstream also refers to the lower indexed chromosome in this context), the second column contains the idnex of the downstream fragment, and the third column contains the number of reads observed for that fragment pair.
* **trans_indices** (*ndarray*) - A numpy array of type int64 and a length of the number of fragments + 1. Each position contains the first entry for the correspondingly-indexed fragment in the first column of 'trans_data'. For example, all of the downstream trans interactions for the fragment at index 5 in the Fragment object 'fragments' array are in cis_data[cis_indices[5]:cis_indices[6], :].
* **frags** (*filestream*) - A filestream to the hdf5 Fragment file such that all saved Fragment attributes can be accessed through this class attribute.
When data is loaded the 'history' attribute is updated to include the history of the Fragment file that becomes associated with it.
"""
self.history += "FiveCData.load_data_from_counts(fragfilename='%s', filelist=%s) - " % (fragfilename, str(filelist))
if 'pysam' not in sys.modules.keys():
if not self.silent:
print >> sys.stderr, ("The pysam module must be installed to use this function.")
self.history += 'Error: pysam module missing\n'
return None
# determine if fragment file exists and if so, load it
if not os.path.exists(fragfilename):
if not self.silent:
print >> sys.stderr, ("The fragment file %s was not found. No data was loaded.\n") % (fragfilename),
self.history += "Error: '%s' not found\n" % fragfilename
return None
self.fragfilename = "%s/%s" % (os.path.relpath(os.path.dirname(os.path.abspath(fragfilename)),
os.path.dirname(self.file)), os.path.basename(fragfilename))
self.frags = h5py.File(fragfilename, 'r')
strands = self.frags['fragments']['strand'][...]
chr2int = {}
for i, j in enumerate(self.frags['chromosomes'][:]):
chr2int[j] = i
# create fragment name dictionary
names = {}
for i in range(self.frags['fragments'].shape[0]):
names[self.frags['fragments']['name'][i]] = i
# load data from all files, skipping if either fragment not not in the fragment file.
if isinstance(filelist[0], str):
filelist = [[filelist[0], filelist[1]]]
total_reads = 0
data = {}
for filepair in filelist:
# determine which files have both mapped ends present
present = True
if not os.path.exists(filepair[0]):
if not self.silent:
print >> sys.stderr, ("%s could not be located.") % (filepair[0]),
self.history += "'%s' not found, " % filepair[0]
present = False
if not os.path.exists(filepair[1]):
if not self.silent:
print >> sys.stderr, ("%s could not be located.") % (filepair[1]),
self.history += "'%s' not found, " % filepair[1]
present = False
if not present:
if not self.silent:
print >> sys.stderr, ("No data for one or both ends could be located. Skipping this run.\n")
reads = 0
unpaired = {}
# load first half of paired ends
if not self.silent:
print >> sys.stderr, ("Loading data from %s...") % (filepair[0].split('/')[-1]),
input = pysam.Samfile(filepair[0], 'rb')
for read in input.fetch(until_eof=True):
# Only consider reads with an alignment
if read.is_unmapped:
continue
# if mapping name not in fragment names, skip
seq_name = input.getrname(read.tid)
if seq_name not in names:
continue
# skip multiply-aligned reads
for tag in read.tags:
if tag[0] == 'XS':
break
else:
unpaired[read.qname] = names[seq_name]
input.close()
if not self.silent:
print >> sys.stderr, ("Done\n"),
# load second half of paired ends
if not self.silent:
print >> sys.stderr, ("Loading data from %s...") % (filepair[1].split('/')[-1]),
input = pysam.Samfile(filepair[1], 'rb')
for read in input.fetch(until_eof=True):
# Only consinder reads whose paired end was valid
if read.qname not in unpaired:
continue
# Only consider reads with an alignment
if read.is_unmapped:
continue
# if mapping name not in fragment names, skip
seq_name = input.getrname(read.tid)
if seq_name not in names:
continue
# skip multiply-aligned reads
for tag in read.tags:
if tag[0] == 'XS':
break
else:
# if both ends map to the same orientation, skip
if strands[unpaired[read.qname]] != strands[names[seq_name]]:
pair = (min(unpaired[read.qname], names[seq_name]),
max(unpaired[read.qname], names[seq_name]))
if pair not in data:
data[pair] = 0
data[pair] += 1
reads += 1
del unpaired[read.qname]
input.close()
if not self.silent:
print >> sys.stderr, ("Done\n"),
if not self.silent:
print >> sys.stderr, ("Read %i validly_mapped read paired.\n") % (reads),
total_reads += reads
if len(data) == 0:
if not self.silent:
print >> sys.stderr, ("No valid data was loaded.\n"),
self.history += "Error: no valid data loaded\n"
return None
if not self.silent:
print >> sys.stderr, ("%i total validly-mapped read pairs loaded. %i unique pairs\n") %\
(total_reads,len(data)),
self._parse_fragment_pairs(data)
self.history += 'Success\n'
return None
def _parse_fragment_pairs(self, frag_pairs):
"""Separate frag pairs into cis (within region) and trans (between region) interactions and write to h5dict with index arrays."""
if not self.silent:
print >> sys.stderr, ("Writing fragment pair data to file..."),
cis = {}
trans = {}
for i in range(self.frags['regions'].shape[0]):
# Find pairs from same region
for j in range(self.frags['regions']['start_frag'][i], self.frags['regions']['stop_frag'][i] - 1):
for k in range(j + 1, self.frags['regions']['stop_frag'][i]):
if (j, k) in frag_pairs:
cis[(j, k)] = frag_pairs[(j, k)]
# Find pairs from different regions
for j in range(self.frags['regions']['start_frag'][i], self.frags['regions']['stop_frag'][i]):
for k in range(self.frags['regions']['stop_frag'][i], self.frags['fragments'].shape[0]):
if (j, k) in frag_pairs:
trans[(j, k)] = frag_pairs[(j, k)]
# convert data into arrays
self.cis_data = numpy.empty((len(cis), 3), dtype=numpy.int32)
self.trans_data = numpy.empty((len(trans), 3), dtype=numpy.int32)
keys = cis.keys()
keys.sort()
for i in range(len(keys)):
self.cis_data[i, 0] = keys[i][0]
self.cis_data[i, 1] = keys[i][1]
self.cis_data[i, 2] = cis[keys[i]]
keys = trans.keys()
keys.sort()
for i in range(len(keys)):
self.trans_data[i, 0] = keys[i][0]
self.trans_data[i, 1] = keys[i][1]
self.trans_data[i, 2] = trans[keys[i]]
# find first instance of each fragment for cis and trans data
if self.cis_data.shape[0] > 0:
self.cis_indices = numpy.r_[0, numpy.bincount(self.cis_data[:, 0],
minlength=self.frags['fragments'].shape[0])].astype(numpy.int64)
for i in range(1, self.cis_indices.shape[0]):
self.cis_indices[i] += self.cis_indices[i - 1]
if self.trans_data.shape[0] > 0:
self.trans_indices = numpy.r_[0, numpy.bincount(self.trans_data[:, 0],
minlength=self.frags['fragments'].shape[0])].astype(numpy.int64)
for i in range(1, self.trans_indices.shape[0]):
self.trans_indices[i] += self.trans_indices[i - 1]
if not self.silent:
print >> sys.stderr, ("Done\n"),
return None
|
|
import copy
from hearthbreaker.tags.base import Status, Action, Aura, Condition, AuraUntil, CardQuery, \
CARD_SOURCE, Effect, Buff, BuffUntil, Amount, Picker, Selector
from hearthbreaker.tags.condition import IsSecret
from hearthbreaker.tags.selector import AllPicker, ConstantSelector
class Give(Action):
def __init__(self, buffs, picker=AllPicker()):
if isinstance(buffs, Status):
self.buffs = [Buff(buffs)]
elif isinstance(buffs, list):
self.buffs = buffs
if isinstance(buffs[0], Aura):
raise TypeError("Aura passed where buff was expected")
elif isinstance(buffs, Aura):
raise TypeError("Aura passed where buff was expected")
else:
self.buffs = [buffs]
self.picker = picker
def act(self, actor, target, other=None):
buffs = self.picker.pick(actor, self.buffs)
for buff in buffs:
target.add_buff(buff.to_instance(target))
def __to_json__(self):
if isinstance(self.picker, AllPicker):
return {
'name': 'give',
'buffs': self.buffs
}
return {
'name': 'give',
'buffs': self.buffs,
'picker': self.picker,
}
def __from_json__(self, buffs=None, effects=None, auras=None, picker=None):
if effects: # To allow for give to work with effects as well, we check at load time
return GiveEffect.__new__(GiveEffect).__from_json__(effects)
if auras: # To allow for give to work with auras as well, we check at load time
return GiveAura.__new__(GiveAura).__from_json__(auras)
self.buffs = []
for buff in buffs:
if "until" in buff:
self.buffs.append(BuffUntil.from_json(**buff))
else:
self.buffs.append(Buff.from_json(**buff))
if not picker:
self.picker = AllPicker()
else:
self.picker = Picker.from_json(**picker)
return self
class GiveAura(Action):
def __init__(self, auras):
if isinstance(auras, list):
self.auras = auras
else:
self.auras = [auras]
def act(self, actor, target, other=None):
for aura in self.auras:
target.add_aura(aura)
def __to_json__(self):
return {
'name': 'give',
'auras': self.auras
}
def __from_json__(self, auras):
self.auras = []
for aura in auras:
if "until" in aura:
self.auras.append(AuraUntil.from_json(**aura))
else:
self.auras.append(Aura.from_json(**aura))
return self
class GiveEffect(Action):
def __init__(self, effects):
if isinstance(effects, Effect):
self.effects = [effects]
else:
self.effects = effects
def act(self, actor, target, other=None):
for effect in self.effects:
for tag in effect.tags:
for action in tag.actions:
if hasattr(action, "selector"):
action.selector = ConstantSelector([obj.born for obj in
action.selector.choose_targets(actor, target)])
target.add_effect(effect)
def __to_json__(self):
return {
'name': 'give',
'effects': self.effects
}
def __from_json__(self, effects):
self.effects = [Effect.from_json(**effect) for effect in effects]
return self
class Summon(Action):
def __init__(self, card, count=1):
if isinstance(card, CardQuery):
self.card = card
else:
self.card = CardQuery(card.ref_name)
self.count = count
def act(self, actor, target, other=None):
card = self.card.get_card(target, target, actor)
if card is None:
return
if actor.is_minion() and actor.player is target:
# When a minion is summoned around another minion, they alternate between left and right,
# starting on the right
if actor.removed:
c = 0
else:
c = 1
for summon in range(self.count):
index = actor.index + (c % 2)
card.summon(target, target.game, index)
if not actor.removed:
c += 1
else:
for summon in range(self.count):
card.summon(target, target.game, len(target.minions))
def __to_json__(self):
if self.count > 1:
return {
'name': 'summon',
'card': self.card,
'count': self.count
}
return {
'name': 'summon',
'card': self.card
}
def __from_json__(self, card, count=1):
self.card = CardQuery.from_json(**card)
self.count = count
return self
class ReplaceHeroWithMinion(Action):
# Used only for Jaraxxus currently
def __init__(self, card):
if isinstance(card, CardQuery):
self.card = card
else:
self.card = CardQuery(card.ref_name)
def act(self, actor, target, other=None):
card = self.card.get_card(target, target.player, actor)
hero = card.create_hero(target.player)
hero.card = card
target.player.trigger("minion_played", actor)
hero.buffs = copy.deepcopy(actor.buffs)
hero.health = actor.health
target.replace(hero)
if hero.health <= 0:
hero.die(None)
def __to_json__(self):
return {
'name': 'replace_hero_with_minion',
'card': self.card
}
def __from_json__(self, card):
self.card = CardQuery.from_json(**card)
return self
class Transform(Action):
def __init__(self, card):
if isinstance(card, CardQuery):
self.card = card
else:
self.card = CardQuery(card.ref_name)
def act(self, actor, target, other=None):
card = self.card.get_card(target, target.player, actor)
if target.is_card():
target.replace(card)
elif target.is_minion():
minion = card.create_minion(target.player)
minion.card = card
target.replace(minion)
elif target.is_hero():
hero = card.create_hero(target.player)
target.replace(hero)
def __to_json__(self):
return {
'name': 'transform',
'card': self.card
}
def __from_json__(self, card):
self.card = CardQuery.from_json(**card)
return self
class Kill(Action):
def act(self, actor, target, other=None):
target.die(None)
def __to_json__(self):
return {
'name': 'kill'
}
class Heal(Action, metaclass=Amount):
def __init__(self):
super().__init__()
def act(self, actor, target, other=None):
target.heal(actor.player.effective_heal_power(self.get_amount(actor, target, other)), actor)
def __to_json__(self):
return {
'name': 'heal',
}
class SetHealth(Action, metaclass=Amount):
def __init__(self):
super().__init__()
def act(self, actor, target, other=None):
target.set_health_to(self.get_amount(actor, target, other))
def __to_json__(self):
return {
'name': 'set_health'
}
class Damage(Action, metaclass=Amount):
def __init__(self):
super().__init__()
def act(self, actor, target, other=None):
target.damage(self.get_amount(actor, target, other), actor)
def __to_json__(self):
return {
'name': 'damage',
}
class Draw(Action, metaclass=Amount):
def __init__(self):
super().__init__()
def act(self, actor, target, other=None):
for draw in range(0, self.get_amount(actor, target, other)):
target.draw()
def __to_json__(self):
return {
'name': 'draw',
}
class Discard(Action, metaclass=Amount):
def __init__(self, query=CardQuery(source=CARD_SOURCE.MY_HAND)):
super().__init__()
self.query = query
def act(self, actor, target, other=None):
for index in range(0, self.get_amount(actor, target, other)):
card = self.query.get_card(target, actor.player, actor)
if card:
actor.player.trigger("discard", card)
def __to_json__(self):
return {
'name': 'discard',
'query': self.query,
}
def __from_json__(self, query):
self.query = CardQuery.from_json(**query)
return self
class IncreaseArmor(Action, metaclass=Amount):
def __init__(self):
super().__init__()
def act(self, actor, target, other=None):
target.armor += self.get_amount(actor, target, other)
def __to_json__(self):
return {
'name': 'increase_armor'
}
class ChangeTarget(Action):
def __init__(self, selector):
self.selector = selector
def act(self, actor, target, other=None):
possible_targets = [t for t in self.selector.choose_targets(target, target.current_target)]
if len(possible_targets) > 0:
target.current_target = possible_targets[0]
def __to_json__(self):
return {
'name': 'change_target',
'selector': self.selector,
}
def __from_json__(self, selector):
from hearthbreaker.tags.base import Selector
self.selector = Selector.from_json(**selector)
return self
class AddCard(Action):
def __init__(self, card, count=1, add_to_deck=False):
if isinstance(card, CardQuery):
self.card = card
else:
self.card = CardQuery(card.ref_name)
self.add_to_deck = add_to_deck
self.count = count
def act(self, actor, target, other=None):
if self.add_to_deck:
for i in range(self.count):
target.deck.put_back(self.card.get_card(target, target, actor))
else:
for i in range(self.count):
if len(target.hand) < 10:
card = self.card.get_card(target, target, actor)
if card:
target.hand.append(copy.copy(card))
card.drawn = True
def __to_json__(self):
if self.add_to_deck:
return {
'name': 'add_card',
'card': self.card,
'count': self.count,
'add_to_deck': self.add_to_deck,
}
return {
'name': 'add_card',
'card': self.card,
'count': self.count
}
def __from_json__(self, card, count=1, add_to_deck=False):
self.card = CardQuery.from_json(**card)
self.count = count
self.add_to_deck = add_to_deck
return self
class ResurrectFriendly(Action):
def __to_json__(self):
return {
'name': 'resurrect_friendly'
}
def act(self, actor, target, other=None):
# Will be called once per Kel'Thuzad on the board
# http://www.hearthhead.com/card=1794/kelthuzad#comments
for minion in sorted(target.dead_this_turn, key=lambda m: m.born):
minion.card.summon(target, target.game, len(target.minions))
class Bounce(Action):
def __init__(self):
super().__init__()
def act(self, actor, target, other=None):
target.bounce()
def __to_json__(self):
return {
'name': 'bounce'
}
class SwapWithHand(Action):
def __init__(self, condition=None):
self.condition = condition
def act(self, actor, target, other=None):
if actor.is_valid():
if self.condition:
chosen_card = target.game.random_draw(target.hand,
lambda c: self.condition.evaluate(c) and c.is_minion())
else:
chosen_card = target.game.random_draw(target.hand, lambda c: c.is_minion())
if chosen_card:
chosen_card.summon(target, target.game, len(target.minions))
chosen_card.unattach()
target.hand.remove(chosen_card)
actor.bounce()
def __to_json__(self):
if self.condition:
return {
'name': 'swap_with_hand',
'condition': self.condition
}
return {
'name': 'swap_with_hand'
}
def __from_json__(self, condition=None):
if condition:
self.condition = Condition.from_json(**condition)
else:
self.condition = None
return self
class ApplySecret(Action):
def __init__(self, source):
self.source = source
self._query = CardQuery(conditions=[IsSecret()], source=source)
def act(self, actor, target, other=None):
secret = self._query.get_card(target, target, actor)
if secret:
target.secrets.append(secret)
secret.player = target
if target is target.game.other_player:
secret.player = target
# To allow for Mad Scientist not to be redeemed or duplicated as a result of its death,
# but still allow other minions that die during the same cycle to be duplicated.
# Based on testing for patch 2.1.0.7785
if actor.dead:
target.bind_once("after_death", secret.activate)
else:
secret.activate(target)
def __to_json__(self):
return {
'name': 'apply_secret',
'source': CARD_SOURCE.to_str(self.source)
}
def __from_json__(self, source):
self.source = CARD_SOURCE.from_str(source)
self._query = CardQuery(conditions=[IsSecret()], source=self.source)
return self
class Equip(Action):
def __init__(self, weapon):
if isinstance(weapon, CardQuery):
self.weapon = weapon
else:
self.weapon = CardQuery(weapon.ref_name)
def act(self, actor, target, other=None):
card = self.weapon.get_card(target, target, actor)
weapon = card.create_weapon(target)
weapon.card = card
weapon.equip(target)
def __to_json__(self):
return {
'name': 'equip',
'weapon': self.weapon
}
def __from_json__(self, weapon):
self.weapon = CardQuery.from_json(**weapon)
return self
class Destroy(Action):
def act(self, actor, target, other=None):
target.destroy()
def __to_json__(self):
return {
'name': 'destroy'
}
class Steal(Action):
def act(self, actor, target, other=None):
new_minion = target.copy(actor.player)
target.unattach()
target.remove_from_board()
new_minion.add_to_board(len(actor.player.minions))
def __to_json__(self):
return {
'name': 'steal'
}
class Duplicate(Action):
def __init__(self, selector):
super().__init__()
self.selector = selector
def act(self, actor, target, other=None):
for minion in self.selector.choose_targets(actor, target):
if len(minion.player.minions) < 7:
dup = minion.copy(minion.player)
dup.add_to_board(minion.index + 1)
def __to_json__(self):
return {
"name": "duplicate",
"selector": self.selector,
}
def __from_json__(self, selector):
self.selector = Selector.from_json(**selector)
return self
class Replace(Action):
def act(self, actor, target, other=None):
new_minion = target.copy(actor.player)
actor.replace(new_minion)
def __to_json__(self):
return {
'name': 'replace'
}
class Silence(Action):
def act(self, actor, target, other=None):
target.silence()
def __to_json__(self):
return {
'name': 'silence'
}
class DestroyManaCrystal(Action):
def act(self, actor, target, other=None):
target.max_mana -= 1
if target.mana > 0:
target.mana -= 1
def __to_json__(self):
return {
'name': 'destroy_mana_crystal'
}
class GiveManaCrystal(Action):
def __init__(self, count=1, empty=False):
self.count = count
self.empty = empty
def act(self, actor, target, other=None):
target.max_mana = min(self.count + target.max_mana, 10)
if not self.empty:
target.mana += self.count
def __to_json__(self):
return {
'name': 'give_mana_crystal',
'count': self.count,
'empty': self.empty,
}
class IncreaseDurability(Action):
def act(self, actor, target, other=None):
target.durability += 1
def __to_json__(self):
return {
'name': 'increase_durability',
}
class DecreaseDurability(Action):
def act(self, actor, target, other=None):
target.durability -= 1
if target.durability <= 0:
target.destroy()
def __to_json__(self):
return {
'name': 'decrease_durability',
}
class IncreaseWeaponAttack(Action, metaclass=Amount):
def __init__(self):
pass
def act(self, actor, target, other=None):
target.base_attack += self.get_amount(actor, target, other)
def __to_json__(self):
return {
'name': 'increase_weapon_attack'
}
class RemoveDivineShields(Action):
def act(self, actor, target, other=None):
from hearthbreaker.tags.status import DivineShield
if target.divine_shield:
target.buffs = [buff for buff in target.buffs if not isinstance(buff.status, DivineShield)]
target.divine_shield = 0
def __to_json__(self):
return {
"name": "remove_divine_shields"
}
# class SwapStats(Action):
# def act(self, actor, target, other=None):
# temp_attack = target.calculate_attack()
# temp_health = target.health
# if temp_attack == 0:
# target.die(None)
# else:
# target.set_attack_to(temp_health)
# target.set_health_to(temp_attack)
#
# def __to_json__(self):
# return {
# 'name': 'swap_stats',
# }
class Remove(Action):
def act(self, actor, target, other=None):
target.unattach()
target.remove_from_board()
def __to_json__(self):
return {
'name': 'remove'
}
class SwapStats(Action):
def __init__(self, source_stat, dest_stat, swap_with_owner):
self.source_stat = source_stat
self.dest_stat = dest_stat
self.swap_with_owner = swap_with_owner
def act(self, actor, target, other=None):
if self.swap_with_owner:
source = actor
else:
source = target
temp = self.get_attribute(source, self.source_stat)
self.set_attribute(source, self.source_stat, self.get_attribute(target, self.dest_stat))
self.set_attribute(target, self.dest_stat, temp)
if source.health == 0:
source.die(None)
if target is not source and target.health == 0:
target.die(None)
actor.player.game.check_delayed()
@staticmethod
def get_attribute(obj, attribute):
if attribute == "damage":
return obj.calculate_max_health() - obj.health
elif attribute == 'mana':
return obj.card.mana
elif attribute == "attack":
return obj.calculate_attack()
elif attribute == "health":
return obj.health
@staticmethod
def set_attribute(obj, attribute, value):
from hearthbreaker.tags.status import ManaChange, SetAttack
if attribute == "damage":
was_enraged = obj.enraged
obj.health = max(0, obj.clculate_max_health() - value)
if value > 0:
obj.enraged = True
if not was_enraged:
obj._do_enrage()
elif attribute == 'mana':
obj.add_buff(Buff(ManaChange(value - obj.mana_cost())))
elif attribute == "attack":
obj.add_buff(Buff(SetAttack(value)))
elif attribute == "health":
obj.set_health_to(value)
def __to_json__(self):
return {
'name': 'swap_stats',
'source_stat': self.source_stat,
'dest_stat': self.dest_stat,
'swap_with_owner': self.swap_with_owner,
}
|
|
#!/usr/bin/env python
"""This file contains utility functions used in ApiCallHandler classes."""
import cStringIO
import itertools
import os
import re
import sys
import zipfile
import yaml
import logging
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.flows.general import export as flow_export
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import api_pb2
class CollectionArchiveGenerator(object):
"""Class that generates downloaded files archive from a collection."""
ZIP = "zip"
TAR_GZ = "tar.gz"
FILES_SKIPPED_WARNING = (
"# NOTE: Some files were skipped because they were referenced in the \n"
"# collection but were not downloaded by GRR, so there were no data \n"
"# blobs in the data store to archive.\n")
BATCH_SIZE = 1000
def __init__(self,
archive_format=ZIP,
prefix=None,
description=None,
predicate=None,
client_id=None):
"""CollectionArchiveGenerator constructor.
Args:
archive_format: May be ArchiveCollectionGenerator.ZIP or
ArchiveCollectionGenerator.TAR_GZ. Defaults to ZIP.
prefix: Name of the folder inside the archive that will contain all
the generated data.
description: String describing archive's contents. It will be included
into the auto-generated MANIFEST file. Defaults to
'Files archive collection'.
predicate: If not None, only the files matching the predicate will be
archived, all others will be skipped.
client_id: The client_id to use when exporting a flow results collection.
Raises:
ValueError: if prefix is None.
"""
super(CollectionArchiveGenerator, self).__init__()
if archive_format == self.ZIP:
self.archive_generator = utils.StreamingZipGenerator(
compression=zipfile.ZIP_DEFLATED)
elif archive_format == self.TAR_GZ:
self.archive_generator = utils.StreamingTarGenerator()
else:
raise ValueError("Unknown archive format: %s" % archive_format)
if not prefix:
raise ValueError("Prefix can't be None.")
self.prefix = prefix
self.description = description or "Files archive collection"
self.total_files = 0
self.archived_files = 0
self.ignored_files = []
self.failed_files = []
self.predicate = predicate or (lambda _: True)
self.client_id = client_id
@property
def output_size(self):
return self.archive_generator.output_size
def _ItemsToUrns(self, items):
"""Converts collection items to aff4 urns suitable for downloading."""
for item in items:
try:
yield flow_export.CollectionItemToAff4Path(item, self.client_id)
except flow_export.ItemNotExportableError:
pass
def _WriteDescription(self):
"""Writes description into a MANIFEST file in the archive."""
manifest = {
"description": self.description,
"processed_files": self.total_files,
"archived_files": self.archived_files,
"ignored_files": len(self.ignored_files),
"failed_files": len(self.failed_files)
}
if self.ignored_files:
manifest["ignored_files_list"] = self.ignored_files
if self.failed_files:
manifest["failed_files_list"] = self.failed_files
manifest_fd = cStringIO.StringIO()
if self.total_files != self.archived_files:
manifest_fd.write(self.FILES_SKIPPED_WARNING)
manifest_fd.write(yaml.safe_dump(manifest))
manifest_fd.seek(0)
st = os.stat_result((0644, 0, 0, 0, 0, 0, len(manifest_fd.getvalue()), 0, 0,
0))
for chunk in self.archive_generator.WriteFromFD(
manifest_fd, os.path.join(self.prefix, "MANIFEST"), st=st):
yield chunk
def Generate(self, collection, token=None):
"""Generates archive from a given collection.
Iterates the collection and generates an archive by yielding contents
of every referenced AFF4Stream.
Args:
collection: Iterable with items that point to aff4 paths.
token: User's ACLToken.
Yields:
Binary chunks comprising the generated archive.
"""
hashes = set()
for fd_urn_batch in utils.Grouper(
self._ItemsToUrns(collection), self.BATCH_SIZE):
fds_to_write = {}
for fd in aff4.FACTORY.MultiOpen(fd_urn_batch, token=token):
self.total_files += 1
if not self.predicate(fd):
self.ignored_files.append(utils.SmartUnicode(fd.urn))
continue
# Any file-like object with data in AFF4 should inherit AFF4Stream.
if isinstance(fd, aff4.AFF4Stream):
archive_path = os.path.join(self.prefix, *fd.urn.Split())
sha256_hash = fd.Get(fd.Schema.HASH, rdf_crypto.Hash()).sha256
if not sha256_hash:
continue
self.archived_files += 1
content_path = os.path.join(self.prefix, "hashes", str(sha256_hash))
if sha256_hash not in hashes:
# Make sure size of the original file is passed. It's required
# when output_writer is StreamingTarWriter.
st = os.stat_result((0644, 0, 0, 0, 0, 0, fd.size, 0, 0, 0))
fds_to_write[fd] = (content_path, st)
hashes.add(sha256_hash)
up_prefix = "../" * len(fd.urn.Split())
yield self.archive_generator.WriteSymlink(up_prefix + content_path,
archive_path)
if fds_to_write:
prev_fd = None
for fd, chunk, exception in aff4.AFF4Stream.MultiStream(fds_to_write):
if exception:
logging.exception(exception)
self.archived_files -= 1
self.failed_files.append(utils.SmartUnicode(fd.urn))
continue
if prev_fd != fd:
if prev_fd:
yield self.archive_generator.WriteFileFooter()
prev_fd = fd
content_path, st = fds_to_write[fd]
yield self.archive_generator.WriteFileHeader(content_path, st=st)
yield self.archive_generator.WriteFileChunk(chunk)
if self.archive_generator.is_file_write_in_progress:
yield self.archive_generator.WriteFileFooter()
for chunk in self._WriteDescription():
yield chunk
yield self.archive_generator.Close()
class ApiDataObject(rdf_structs.RDFProtoStruct):
"""Defines a proto for returning Data Objects over the API."""
protobuf = api_pb2.ApiDataObject
def InitFromDataObject(self, data_object):
for key, value in sorted(data_object.iteritems()):
item = ApiDataObjectKeyValuePair().InitFromKeyValue(key, value)
self.items.append(item)
return self
class ApiDataObjectKeyValuePair(rdf_structs.RDFProtoStruct):
"""Defines a proto for returning key value pairs of data objects."""
protobuf = api_pb2.ApiDataObjectKeyValuePair
def InitFromKeyValue(self, key, value):
self.key = key
# Convert primitive types to rdf values so they can be serialized.
if isinstance(value, float) and not value.is_integer():
# TODO(user): Do not convert float values here and mark them invalid
# later. ATM, we do not have means to properly represent floats. Change
# this part once we have a RDFFloat implementation.
pass
elif rdfvalue.RDFInteger.IsNumeric(value):
value = rdfvalue.RDFInteger(value)
elif isinstance(value, basestring):
value = rdfvalue.RDFString(value)
elif isinstance(value, bool):
value = rdfvalue.RDFBool(value)
if isinstance(value, rdfvalue.RDFValue):
self.type = value.__class__.__name__
self.value = value
else:
self.invalid = True
return self
def GetArgsClass(self):
try:
return rdfvalue.RDFValue.GetPlugin(self.type)
except KeyError:
raise ValueError("No class found for type %s." % self.type)
def FilterCollection(collection, offset, count=0, filter_value=None):
"""Filters an aff4 collection, getting count elements, starting at offset."""
if offset < 0:
raise ValueError("Offset needs to be greater than or equal to zero")
if count < 0:
raise ValueError("Count needs to be greater than or equal to zero")
count = count or sys.maxint
if filter_value:
index = 0
items = []
for item in collection.GenerateItems():
serialized_item = item.SerializeToString()
if re.search(re.escape(filter_value), serialized_item, re.I):
if index >= offset:
items.append(item)
index += 1
if len(items) >= count:
break
else:
items = list(itertools.islice(collection.GenerateItems(offset), count))
return items
|
|
#!/usr/bin/env python3
# Written by John Hoffman
# see LICENSE.txt for license information
DOWNLOAD_SCROLL_RATE = 1
import sys
import os
import time
import signal
import threading
from BitTornado.Client.launchmanycore import LaunchMany
from BitTornado.Client.download_bt1 import defaults, get_usage
from BitTornado.Application.NumberFormats import formatSize, formatIntClock
from BitTornado.Application.parseargs import parseargs
from BitTornado import version, report_url
from BitTornado.Application.ConfigDir import ConfigDir
try:
import curses
import curses.panel
except ImportError:
print('Textmode GUI initialization failed, cannot proceed.')
print()
print('This download interface requires the standard Python module '
'"curses", which is unfortunately not available for the native '
'Windows port of Python. It is however available for the Cygwin '
'port of Python, running on all Win32 systems (www.cygwin.com).')
print()
print('You may still use "btdownloadheadless.py" to download.')
sys.exit(1)
Exceptions = []
def ljust(s, size):
return s[:size].ljust(size)
def rjust(s, size):
return s[:size].rjust(size)
class CursesDisplayer:
def __init__(self, scrwin):
self.messages = []
self.scroll_pos = 0
self.scroll_time = 0
self.scrwin = scrwin
signal.signal(signal.SIGWINCH, self.winch_handler)
self.changeflag = threading.Event()
self._remake_window()
def winch_handler(self, signum, stackframe):
self.changeflag.set()
curses.endwin()
self.scrwin.refresh()
self.scrwin = curses.newwin(0, 0, 0, 0)
self._remake_window()
self._display_messages()
def _remake_window(self):
self.scrh, self.scrw = self.scrwin.getmaxyx()
self.scrpan = curses.panel.new_panel(self.scrwin)
self.mainwinh = int(2 * (self.scrh) / 3)
self.mainwinw = self.scrw - 4 # - 2 (bars) - 2 (spaces)
self.mainwiny = 2 # + 1 (bar) + 1 (titles)
self.mainwinx = 2 # + 1 (bar) + 1 (space)
# + 1 to all windows so we can write at mainwinw
self.mainwin = curses.newwin(self.mainwinh, self.mainwinw + 1,
self.mainwiny, self.mainwinx)
self.mainpan = curses.panel.new_panel(self.mainwin)
self.mainwin.scrollok(0)
self.mainwin.nodelay(1)
self.headerwin = curses.newwin(1, self.mainwinw + 1,
1, self.mainwinx)
self.headerpan = curses.panel.new_panel(self.headerwin)
self.headerwin.scrollok(0)
self.totalwin = curses.newwin(1, self.mainwinw + 1,
self.mainwinh + 1, self.mainwinx)
self.totalpan = curses.panel.new_panel(self.totalwin)
self.totalwin.scrollok(0)
self.statuswinh = self.scrh - 4 - self.mainwinh
self.statuswin = curses.newwin(self.statuswinh, self.mainwinw + 1,
self.mainwinh + 3, self.mainwinx)
self.statuspan = curses.panel.new_panel(self.statuswin)
self.statuswin.scrollok(0)
try:
self.scrwin.border(*map(ord, '||-- '))
except Exception:
pass
self.headerwin.addnstr(0, 2, '#', self.mainwinw - 25, curses.A_BOLD)
self.headerwin.addnstr(0, 4, 'Filename', self.mainwinw - 25,
curses.A_BOLD)
self.headerwin.addnstr(0, self.mainwinw - 24, 'Size', 4, curses.A_BOLD)
self.headerwin.addnstr(0, self.mainwinw - 18, 'Download', 8,
curses.A_BOLD)
self.headerwin.addnstr(0, self.mainwinw - 6, 'Upload', 6,
curses.A_BOLD)
self.totalwin.addnstr(0, self.mainwinw - 27, 'Totals:', 7,
curses.A_BOLD)
self._display_messages()
curses.panel.update_panels()
curses.doupdate()
self.changeflag.clear()
def _display_line(self, s, bold=False):
if self.disp_end:
return True
line = self.disp_line
self.disp_line += 1
if line < 0:
return False
if bold:
self.mainwin.addnstr(line, 0, s, self.mainwinw, curses.A_BOLD)
else:
self.mainwin.addnstr(line, 0, s, self.mainwinw)
if self.disp_line >= self.mainwinh:
self.disp_end = True
return self.disp_end
def _display_data(self, data):
if 3 * len(data) <= self.mainwinh:
self.scroll_pos = 0
self.scrolling = False
elif self.scroll_time + DOWNLOAD_SCROLL_RATE < time.time():
self.scroll_time = time.time()
self.scroll_pos += 1
self.scrolling = True
if self.scroll_pos >= 3 * len(data) + 2:
self.scroll_pos = 0
i = int(self.scroll_pos / 3)
self.disp_line = (3 * i) - self.scroll_pos
self.disp_end = False
while not self.disp_end:
ii = i % len(data)
if i and not ii:
if not self.scrolling:
break
self._display_line('')
if self._display_line(''):
break
(name, status, progress, peers, seeds, _, dist, uprate, dnrate,
upamt, dnamt, size, t, msg) = data[ii]
if t is not None and t > 0:
status = 'ETA in ' + formatIntClock(t)
name = ljust(name, self.mainwinw - 32)
size = rjust(formatSize(size), 8)
uprate = rjust('%s/s' % formatSize(uprate), 10)
dnrate = rjust('%s/s' % formatSize(dnrate), 10)
line = "%3d %s%s%s%s" % (ii + 1, name, size, dnrate, uprate)
self._display_line(line, True)
if peers + seeds:
datastr = ' ({}) {} - {} up {} dn - {} peers {} seeds ' \
'{:.3f} dist copies'.format(
progress, status, formatSize(upamt), formatSize(dnamt),
peers, seeds, dist)
else:
datastr = ' ({}) {} - {} up {} dn'.format(
progress, status, formatSize(upamt), formatSize(dnamt))
self._display_line(datastr)
self._display_line(' ' + ljust(msg, self.mainwinw - 4))
i += 1
def display(self, data):
if self.changeflag.is_set():
return
inchar = self.mainwin.getch()
if inchar == 12: # ^L
self._remake_window()
self.mainwin.erase()
if data:
self._display_data(data)
else:
self.mainwin.addnstr(1, int(self.mainwinw / 2) - 5,
'no torrents', 12, curses.A_BOLD)
totalup = 0
totaldn = 0
for entry in data:
#entry = (name, status, progress, peers, seeds, seedsmsg, dist,
# uprate, downrate, upamount, downamount, size, t, msg)
totalup += entry[7]
totaldn += entry[8]
totalup = '%s/s' % formatSize(totalup)
totaldn = '%s/s' % formatSize(totaldn)
self.totalwin.erase()
self.totalwin.addnstr(0, self.mainwinw - 27, 'Totals:', 7,
curses.A_BOLD)
self.totalwin.addnstr(0, self.mainwinw - 20 + (10 - len(totaldn)),
totaldn, 10, curses.A_BOLD)
self.totalwin.addnstr(0, self.mainwinw - 10 + (10 - len(totalup)),
totalup, 10, curses.A_BOLD)
curses.panel.update_panels()
curses.doupdate()
return inchar in (ord('q'), ord('Q'))
def message(self, s):
self.messages.append(time.strftime('%x %X - ', time.localtime()) + s)
self._display_messages()
def _display_messages(self):
self.statuswin.erase()
winpos = 0
for s in self.messages[-self.statuswinh:]:
self.statuswin.addnstr(winpos, 0, s, self.mainwinw)
winpos += 1
curses.panel.update_panels()
curses.doupdate()
def exception(self, s):
Exceptions.append(s)
self.message('SYSTEM ERROR - EXCEPTION GENERATED')
def LaunchManyWrapper(scrwin, config):
LaunchMany(config, CursesDisplayer(scrwin))
if __name__ == '__main__':
if sys.argv[1:] == ['--version']:
print(version)
sys.exit(0)
defaults.extend([
('parse_dir_interval', 60,
'how often to rescan the torrent directory, in seconds'),
('saveas_style', 2, 'How to name torrent downloads (1 = rename to '
'torrent name, 2 = save under name in torrent, 3 = save in directory '
'under torrent name)'),
('display_path', 0, 'whether to display the full path or the torrent '
'contents for each torrent'),
])
try:
configdir = ConfigDir('launchmanycurses')
defaultsToIgnore = ['responsefile', 'url', 'priority']
configdir.setDefaults(defaults, defaultsToIgnore)
configdefaults = configdir.loadConfig()
defaults.append(('save_options', 0, 'whether to save the current '
'options as the new default configuration (only for '
'btlaunchmanycurses.py)'))
if len(sys.argv) < 2:
print("Usage: btlaunchmanycurses.py <directory> <global options>\n"
"<directory> - directory to look for .torrent files "
"(semi-recursive)")
print(get_usage(defaults, 80, configdefaults))
sys.exit(1)
config, args = parseargs(sys.argv[1:], defaults, 1, 1, configdefaults)
if config['save_options']:
configdir.saveConfig(config)
configdir.deleteOldCacheData(config['expire_cache_data'])
if not os.path.isdir(args[0]):
raise ValueError("Warning: " + args[0] + " is not a directory")
config['torrent_dir'] = args[0]
except ValueError as e:
print('error: {}\nrun with no args for parameter explanations'
''.format(e))
sys.exit(1)
curses.wrapper(LaunchManyWrapper, config)
if Exceptions:
print('\nEXCEPTION:')
print(Exceptions[0])
print('please report this to ' + report_url)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
use 2nd order friedmann equations + the continuity equation
numerically integrate the cosmological equations for standard cosmology where
omega = 1 for matter universe
also
omega = 1 30% matter, 70% dark energy
luminosity distance plots
https://arxiv.org/pdf/astro-ph/9905116.pdf
pl.plot(sampler.flatchain[:,0])
pl.plot(sampler.flatchain[:,1])
pl.plot(sampler.flatchain[:,0],sampler.flatchain[:,1],'.')
interaction term next
>>ODE solver (ODE int)
integrate both scale factor and continuity equation at the same time
change units
use time = 1/h0 and distance c/h0
AND
integrate backwards from today so from 0 ti -t
ok to ask Luke
later : movify to include co-moving distance (Ryden?)
+ Luminosity distance
find out how old a universe with only matter and at
critical density would be today
integrate over time, t0 will be now, where a crosses 0 is the beginning
so find time in years
then tell how a changes with time and the age of the universe in universe with
critical
density today, single fluid an with equation of state w=-1/3
Dodn't always ask what to do
event catcher - stop integration once a reaches 0
put in more than one fluid in the universe
maybe http://www.ni.gsu.edu/~rclewley/PyDSTool/FrontPage.html
m + DE universe (age)
Create plot of omega lambda vs omaga matter across age of universe
0.3 and 0.7 today, what were they in the past
Krysztoff, Luke
[try a_dot as equation and not just a_dot, i.e. ditch a_dotdot, but that is if
the equations are correct. if solver doens't have a problem when feeding data
in at each calculation then a_dot as a_dot and not equation shouldn't matter] -
worked out in the end
what interaction terms could there be?
NOT AN ASSIGNMENT
"""
from scipy.integrate import odeint
import numpy as np
from pylab import figure, plot, xlabel, grid, legend, title
from matplotlib.font_manager import FontProperties
def vectorfield(v, t, w):
a, a_dot, e_dash_m, e_dash_de, omega0m, omega0de = v
w_m, w_de = w
# Create f = [a_dot, a_dotdot, e'_dotm, e'_dotde, omegam_dot, omegade_dot]:
f = [a_dot,
(-a/2)*(e_dash_m*(1+3*w_m)+e_dash_de*(1+3*w_de)),
-3*(a_dot/a)*(e_dash_m*(1+w_m)),
-3*(a_dot/a)*(e_dash_de*(1+w_de)),
(H0**2)*e_dash_m*(2*a/a_dot)*(1-a*(-a/2)*(e_dash_m*(1+3*w_m)+
e_dash_de*(1+3*w_de))/a_dot**2)+(a/a_dot)**2*
(-3*a_dot/a*e_dash_m*(1+w_m)),
(H0**2)*e_dash_de*(2*a/a_dot)*(1-a*(-a/2)*(e_dash_m*(1+3*w_m)+
e_dash_de*(1+3*w_de))/a_dot**2)+(a/a_dot)**2*
(-3*a_dot/a*e_dash_de*(1+w_de))]
return f
# a past which to discard values, value chosen by looking at the plot
# set arbitrarily - sometimes jumps over the result(?)
a_d = 10e-6
# time in 1/H0 to integrate until, if this time isn't long enough to reach a_d
# then "time" will be added to integration time until a_d is reached
# 0.665 matter only, 0.96 m+de, -0.49
time = 0.6
# Parameters
H0 = 1 # Hubble parameter at t=now
#Dh = c/H0 # Hubble distance
tH = 1.0/H0 # Hubble time
# G = 1
# c = 1
# Eq of state parameter
w_m = 0.0 # matter
w_de = -1.0 # cosmological constant (dark energy)
w_r = 1/3 # radiation
# Initial conditions
# a0 = scale factor, a_dot = speed, e_dash0 = e0/ec0
a0 = 1.0
a_dot0 = 1.0
e_dash0m = 0.3
e_dash0de = 0.7
omega0m = 0.3
omega0de = 0.7
# ODE solver parameters
abserr = 1.0e-8
relerr = 1.0e-6
numpoints = 250
stoptime = -time
# Plot the solution
figure()
xlabel('t in 1/H0')
grid(True)
lw = 1
while True:
# print('stoptime is:',str(stoptime),'1/H0')
# Create the time samples for the output of the ODE solver.
t = [stoptime*tH * float(i) / (numpoints - 1) for i in range(numpoints)]
# Pack up the parameters and initial conditions:
v0 = [a0, a_dot0, e_dash0m, e_dash0de, omega0m, omega0de]
w = [w_m, w_de]
# Call the ODE solver.
vsol = odeint(vectorfield, v0, t, args=(w,), atol=abserr, rtol=relerr)
# Remove unwanted results from the plot
# Separate results into their own arrays
a = vsol[:,0]
a_dot = vsol[:,1]
e_dashm = vsol[:,2]
e_dashde = vsol[:,3]
omegam = vsol[:,4]
omegade = vsol[:,5]
# Find where results start to get strange
blowups = np.where(a < a_d) # tuple with indecies of a
# so small it blows up a_dot
blowups = np.asarray(blowups) # converting to np array
if blowups.any():
blowup = blowups[0,0] # first instance of a being too small
else:
stoptime -= time
continue
# Remove the values after the index when a is too small
t_cut = np.asarray(t)
t_cut = t_cut[:blowup]
a = a[:blowup]
a_dot = a_dot[:blowup]
e_dashm = e_dashm[:blowup]
e_dashde = e_dashde[:blowup]
omegam = omegam[:blowup]
omegade = omegade[:blowup]
# Age
age = t_cut[np.argmin(t_cut)]
age = round(age, 2)
# plotting selected resutls
# a and a_dot
xlabel('t in 1/H0')
lw = 1
plot(t_cut, a, 'r', linewidth=lw)
plot(t_cut, a_dot, 'b', linewidth=lw)
legend((r'$a$', r'$\.a$'), prop=FontProperties(size=16))
title('Cut results for $\omega$ = %s, age = %s 1/H0'%(w,age))
while False:
# e_dashm
figure()
xlabel('t in 1/H0')
lw = 1
plot(t_cut, e_dashm, 'g', linewidth=lw)
title('Cut $\epsilon_m \'$ for $\omega$ = %s'%(w))
# e_dashde
figure()
xlabel('t in 1/H0')
lw = 1
plot(t_cut, e_dashde, 'm', linewidth=lw)
title('Cut $\epsilon_{DE} \'$ for $\omega$ = %s'%(w))
break
# omegam and omegade
figure()
plot(t_cut, omegam, 'c', linewidth=lw)
plot(t_cut, omegade, 'k', linewidth=lw)
legend((r'$\Omega_m$', r'$\Omega_{DE}$'), prop=FontProperties(size=16))
title('Cut results for $\omega$ = %s, age = %s 1/H0'%(w,age))
break
while False:
figure()
xlabel('t in 1/H0')
grid(True)
lw = 1
# plotting all results
plot(t, vsol[:,0], 'r', linewidth=lw)
plot(t, vsol[:,1], 'b', linewidth=lw)
legend((r'$a$', r'$\.a$', r'$\'\epsilon$'), prop=FontProperties(size=16))
title('Complete results for $\omega$ = %s'%(w))
break
|
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from mock import Mock
import cassandra
from cassandra.cqltypes import IntegerType, AsciiType, TupleType
from cassandra.metadata import (Murmur3Token, MD5Token,
BytesToken, ReplicationStrategy,
NetworkTopologyStrategy, SimpleStrategy,
LocalStrategy, NoMurmur3, protect_name,
protect_names, protect_value, is_valid_name,
UserType, KeyspaceMetadata, Metadata,
_UnknownStrategy)
from cassandra.policies import SimpleConvictionPolicy
from cassandra.pool import Host
class StrategiesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
if not hasattr(cls, 'assertItemsEqual'):
cls.assertItemsEqual = cls.assertCountEqual
def test_replication_strategy(self):
"""
Basic code coverage testing that ensures different ReplicationStrategies
can be initiated using parameters correctly.
"""
rs = ReplicationStrategy()
self.assertEqual(rs.create('OldNetworkTopologyStrategy', None), _UnknownStrategy('OldNetworkTopologyStrategy', None))
fake_options_map = {'options': 'map'}
uks = rs.create('OldNetworkTopologyStrategy', fake_options_map)
self.assertEqual(uks, _UnknownStrategy('OldNetworkTopologyStrategy', fake_options_map))
self.assertEqual(uks.make_token_replica_map({}, []), {})
fake_options_map = {'dc1': '3'}
self.assertIsInstance(rs.create('NetworkTopologyStrategy', fake_options_map), NetworkTopologyStrategy)
self.assertEqual(rs.create('NetworkTopologyStrategy', fake_options_map).dc_replication_factors,
NetworkTopologyStrategy(fake_options_map).dc_replication_factors)
fake_options_map = {'options': 'map'}
self.assertIsNone(rs.create('SimpleStrategy', fake_options_map))
fake_options_map = {'options': 'map'}
self.assertIsInstance(rs.create('LocalStrategy', fake_options_map), LocalStrategy)
fake_options_map = {'options': 'map', 'replication_factor': 3}
self.assertIsInstance(rs.create('SimpleStrategy', fake_options_map), SimpleStrategy)
self.assertEqual(rs.create('SimpleStrategy', fake_options_map).replication_factor,
SimpleStrategy(fake_options_map).replication_factor)
self.assertEqual(rs.create('xxxxxxxx', fake_options_map), _UnknownStrategy('xxxxxxxx', fake_options_map))
self.assertRaises(NotImplementedError, rs.make_token_replica_map, None, None)
self.assertRaises(NotImplementedError, rs.export_for_schema)
def test_nts_make_token_replica_map(self):
token_to_host_owner = {}
dc1_1 = Host('dc1.1', SimpleConvictionPolicy)
dc1_2 = Host('dc1.2', SimpleConvictionPolicy)
dc1_3 = Host('dc1.3', SimpleConvictionPolicy)
for host in (dc1_1, dc1_2, dc1_3):
host.set_location_info('dc1', 'rack1')
token_to_host_owner[MD5Token(0)] = dc1_1
token_to_host_owner[MD5Token(100)] = dc1_2
token_to_host_owner[MD5Token(200)] = dc1_3
dc2_1 = Host('dc2.1', SimpleConvictionPolicy)
dc2_2 = Host('dc2.2', SimpleConvictionPolicy)
dc2_1.set_location_info('dc2', 'rack1')
dc2_2.set_location_info('dc2', 'rack1')
token_to_host_owner[MD5Token(1)] = dc2_1
token_to_host_owner[MD5Token(101)] = dc2_2
dc3_1 = Host('dc3.1', SimpleConvictionPolicy)
dc3_1.set_location_info('dc3', 'rack3')
token_to_host_owner[MD5Token(2)] = dc3_1
ring = [MD5Token(0),
MD5Token(1),
MD5Token(2),
MD5Token(100),
MD5Token(101),
MD5Token(200)]
nts = NetworkTopologyStrategy({'dc1': 2, 'dc2': 2, 'dc3': 1})
replica_map = nts.make_token_replica_map(token_to_host_owner, ring)
self.assertItemsEqual(replica_map[MD5Token(0)], (dc1_1, dc1_2, dc2_1, dc2_2, dc3_1))
def test_nts_make_token_replica_map_empty_dc(self):
host = Host('1', SimpleConvictionPolicy)
host.set_location_info('dc1', 'rack1')
token_to_host_owner = {MD5Token(0): host}
ring = [MD5Token(0)]
nts = NetworkTopologyStrategy({'dc1': 1, 'dc2': 0})
replica_map = nts.make_token_replica_map(token_to_host_owner, ring)
self.assertEqual(set(replica_map[MD5Token(0)]), set([host]))
def test_nts_export_for_schema(self):
strategy = NetworkTopologyStrategy({'dc1': '1', 'dc2': '2'})
self.assertEqual("{'class': 'NetworkTopologyStrategy', 'dc1': '1', 'dc2': '2'}",
strategy.export_for_schema())
def test_simple_strategy_make_token_replica_map(self):
host1 = Host('1', SimpleConvictionPolicy)
host2 = Host('2', SimpleConvictionPolicy)
host3 = Host('3', SimpleConvictionPolicy)
token_to_host_owner = {
MD5Token(0): host1,
MD5Token(100): host2,
MD5Token(200): host3
}
ring = [MD5Token(0), MD5Token(100), MD5Token(200)]
rf1_replicas = SimpleStrategy({'replication_factor': '1'}).make_token_replica_map(token_to_host_owner, ring)
self.assertItemsEqual(rf1_replicas[MD5Token(0)], [host1])
self.assertItemsEqual(rf1_replicas[MD5Token(100)], [host2])
self.assertItemsEqual(rf1_replicas[MD5Token(200)], [host3])
rf2_replicas = SimpleStrategy({'replication_factor': '2'}).make_token_replica_map(token_to_host_owner, ring)
self.assertItemsEqual(rf2_replicas[MD5Token(0)], [host1, host2])
self.assertItemsEqual(rf2_replicas[MD5Token(100)], [host2, host3])
self.assertItemsEqual(rf2_replicas[MD5Token(200)], [host3, host1])
rf3_replicas = SimpleStrategy({'replication_factor': '3'}).make_token_replica_map(token_to_host_owner, ring)
self.assertItemsEqual(rf3_replicas[MD5Token(0)], [host1, host2, host3])
self.assertItemsEqual(rf3_replicas[MD5Token(100)], [host2, host3, host1])
self.assertItemsEqual(rf3_replicas[MD5Token(200)], [host3, host1, host2])
def test_ss_equals(self):
self.assertNotEqual(SimpleStrategy({'replication_factor': '1'}), NetworkTopologyStrategy({'dc1': 2}))
class NameEscapingTest(unittest.TestCase):
def test_protect_name(self):
"""
Test cassandra.metadata.protect_name output
"""
self.assertEqual(protect_name('tests'), 'tests')
self.assertEqual(protect_name('test\'s'), '"test\'s"')
self.assertEqual(protect_name('test\'s'), "\"test's\"")
self.assertEqual(protect_name('tests ?!@#$%^&*()'), '"tests ?!@#$%^&*()"')
self.assertEqual(protect_name('1'), '"1"')
self.assertEqual(protect_name('1test'), '"1test"')
def test_protect_names(self):
"""
Test cassandra.metadata.protect_names output
"""
self.assertEqual(protect_names(['tests']), ['tests'])
self.assertEqual(protect_names(
[
'tests',
'test\'s',
'tests ?!@#$%^&*()',
'1'
]),
[
'tests',
"\"test's\"",
'"tests ?!@#$%^&*()"',
'"1"'
])
def test_protect_value(self):
"""
Test cassandra.metadata.protect_value output
"""
self.assertEqual(protect_value(True), "true")
self.assertEqual(protect_value(False), "false")
self.assertEqual(protect_value(3.14), '3.14')
self.assertEqual(protect_value(3), '3')
self.assertEqual(protect_value('test'), "'test'")
self.assertEqual(protect_value('test\'s'), "'test''s'")
self.assertEqual(protect_value(None), 'NULL')
def test_is_valid_name(self):
"""
Test cassandra.metadata.is_valid_name output
"""
self.assertEqual(is_valid_name(None), False)
self.assertEqual(is_valid_name('test'), True)
self.assertEqual(is_valid_name('Test'), False)
self.assertEqual(is_valid_name('t_____1'), True)
self.assertEqual(is_valid_name('test1'), True)
self.assertEqual(is_valid_name('1test1'), False)
non_valid_keywords = cassandra.metadata._keywords - cassandra.metadata._unreserved_keywords
for keyword in non_valid_keywords:
self.assertEqual(is_valid_name(keyword), False)
class TokensTest(unittest.TestCase):
def test_murmur3_tokens(self):
try:
murmur3_token = Murmur3Token(cassandra.metadata.MIN_LONG - 1)
self.assertEqual(murmur3_token.hash_fn('123'), -7468325962851647638)
self.assertEqual(murmur3_token.hash_fn(b'\x00\xff\x10\xfa\x99' * 10), 5837342703291459765)
self.assertEqual(murmur3_token.hash_fn(b'\xfe' * 8), -8927430733708461935)
self.assertEqual(murmur3_token.hash_fn(b'\x10' * 8), 1446172840243228796)
self.assertEqual(murmur3_token.hash_fn(str(cassandra.metadata.MAX_LONG)), 7162290910810015547)
self.assertEqual(str(murmur3_token), '<Murmur3Token: -9223372036854775809>')
except NoMurmur3:
raise unittest.SkipTest('The murmur3 extension is not available')
def test_md5_tokens(self):
md5_token = MD5Token(cassandra.metadata.MIN_LONG - 1)
self.assertEqual(md5_token.hash_fn('123'), 42767516990368493138776584305024125808)
self.assertEqual(md5_token.hash_fn(str(cassandra.metadata.MAX_LONG)), 28528976619278518853815276204542453639)
self.assertEqual(str(md5_token), '<MD5Token: %s>' % -9223372036854775809)
def test_bytes_tokens(self):
bytes_token = BytesToken(str(cassandra.metadata.MIN_LONG - 1))
self.assertEqual(bytes_token.hash_fn('123'), '123')
self.assertEqual(bytes_token.hash_fn(123), 123)
self.assertEqual(bytes_token.hash_fn(str(cassandra.metadata.MAX_LONG)), str(cassandra.metadata.MAX_LONG))
self.assertEqual(str(bytes_token), "<BytesToken: -9223372036854775809>")
try:
bytes_token = BytesToken(cassandra.metadata.MIN_LONG - 1)
self.fail('Tokens for ByteOrderedPartitioner should be only strings')
except TypeError:
pass
class KeyspaceMetadataTest(unittest.TestCase):
def test_export_as_string_user_types(self):
keyspace_name = 'test'
keyspace = KeyspaceMetadata(keyspace_name, True, 'SimpleStrategy', dict(replication_factor=3))
keyspace.user_types['a'] = UserType(keyspace_name, 'a', ['one', 'two'],
[self.mock_user_type('UserType', 'c'),
self.mock_user_type('IntType', 'int')])
keyspace.user_types['b'] = UserType(keyspace_name, 'b', ['one', 'two', 'three'],
[self.mock_user_type('UserType', 'd'),
self.mock_user_type('IntType', 'int'),
self.mock_user_type('UserType', 'a')])
keyspace.user_types['c'] = UserType(keyspace_name, 'c', ['one'],
[self.mock_user_type('IntType', 'int')])
keyspace.user_types['d'] = UserType(keyspace_name, 'd', ['one'],
[self.mock_user_type('UserType', 'c')])
self.assertEqual("""CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'} AND durable_writes = true;
CREATE TYPE test.c (
one int
);
CREATE TYPE test.a (
one c,
two int
);
CREATE TYPE test.d (
one c
);
CREATE TYPE test.b (
one d,
two int,
three a
);""", keyspace.export_as_string())
def mock_user_type(self, cassname, typename):
return Mock(**{'cassname': cassname, 'typename': typename, 'cql_parameterized_type.return_value': typename})
class UserTypesTest(unittest.TestCase):
def test_as_cql_query(self):
field_types = [IntegerType, AsciiType, TupleType.apply_parameters([IntegerType, AsciiType])]
udt = UserType("ks1", "mytype", ["a", "b", "c"], field_types)
self.assertEqual("CREATE TYPE ks1.mytype (a varint, b ascii, c frozen<tuple<varint, ascii>>);", udt.as_cql_query(formatted=False))
self.assertEqual("""CREATE TYPE ks1.mytype (
a varint,
b ascii,
c frozen<tuple<varint, ascii>>
);""", udt.as_cql_query(formatted=True))
def test_as_cql_query_name_escaping(self):
udt = UserType("MyKeyspace", "MyType", ["AbA", "keyspace"], [AsciiType, AsciiType])
self.assertEqual('CREATE TYPE "MyKeyspace"."MyType" ("AbA" ascii, "keyspace" ascii);', udt.as_cql_query(formatted=False))
class IndexTest(unittest.TestCase):
def test_build_index_as_cql(self):
column_meta = Mock()
column_meta.name = 'column_name_here'
column_meta.table.name = 'table_name_here'
column_meta.table.keyspace.name = 'keyspace_name_here'
meta_model = Metadata()
row = {'index_name': 'index_name_here', 'index_type': 'index_type_here'}
index_meta = meta_model._build_index_metadata(column_meta, row)
self.assertEqual(index_meta.as_cql_query(),
'CREATE INDEX index_name_here ON keyspace_name_here.table_name_here (column_name_here)')
row['index_options'] = '{ "class_name": "class_name_here" }'
row['index_type'] = 'CUSTOM'
index_meta = meta_model._build_index_metadata(column_meta, row)
self.assertEqual(index_meta.as_cql_query(),
"CREATE CUSTOM INDEX index_name_here ON keyspace_name_here.table_name_here (column_name_here) USING 'class_name_here'")
|
|
#! /usr/bin/env python
"""
Module with cube cosmetic functions for SDI datasets.
"""
from __future__ import division
from __future__ import print_function
__author__ = 'V. Christiaens @ UChile/ULg, C. Gomez @ ULg'
__all__ = ['cube_correct_nan',
'approx_stellar_position']
import copy
import numpy as np
from skimage.draw import circle
from astropy.stats import sigma_clipped_stats
from ..stats import sigma_filter
def cube_correct_nan(cube, neighbor_box=3, min_neighbors=3, verbose=False,
half_res_y=False):
"""Sigma filtering of nan pixels in a whole frame or cube. Intended for
SINFONI data.
Parameters
----------
cube : cube_like
Input 3d or 2d array.
neighbor_box : int, optional
The side of the square window around each pixel where the sigma and
median are calculated for the nan pixel correction.
min_neighbors : int, optional
Minimum number of good neighboring pixels to be able to correct the
bad/nan pixels.
verbose: {False,True} bool, optional
Whether to print more information or not during processing
half_res_y: bool, {True,False}, optional
Whether the input data have every couple of 2 rows identical, i.e. there
is twice less angular resolution vertically than horizontally (e.g.
SINFONI data). The algorithm goes twice faster if this option is
rightfully set to True.
Returns
-------
obj_tmp : array_like
Output cube with corrected nan pixels in each frame
"""
obj_tmp= cube.copy()
ndims = obj_tmp.ndim
if ndims != 2 and ndims != 3:
raise TypeError("Input object is not two or three dimensional")
if neighbor_box < 3 or neighbor_box%2 == 0:
raise ValueError('neighbor_box should be an odd value greater than 3')
max_neigh = sum(range(3,neighbor_box+2,2))
if min_neighbors > max_neigh:
min_neighbors = max_neigh
msg = "Warning! min_neighbors was reduced to "+str(max_neigh) +\
" to avoid bugs. \n"
print(msg)
def nan_corr_2d(obj_tmp):
n_x = obj_tmp.shape[1]
n_y = obj_tmp.shape[0]
if half_res_y:
if n_y%2 != 0:
msg = 'The input frames do not have an even number of rows. '
msg2 = 'Hence, you should probably not be using the option '
msg3 = 'half_res_y = True.'
raise ValueError(msg+msg2+msg3)
n_y = int(n_y/2)
frame = obj_tmp
obj_tmp = np.zeros([n_y,n_x])
for yy in range(n_y):
obj_tmp[yy] = frame[2*yy]
# tuple with the 2D indices of each nan value of the frame
nan_indices = np.where(np.isnan(obj_tmp))
nan_map = np.zeros_like(obj_tmp)
nan_map[nan_indices] = 1
nnanpix = int(np.sum(nan_map))
# Correct nan with iterative sigma filter
obj_tmp = sigma_filter(obj_tmp, nan_map, neighbor_box=neighbor_box,
min_neighbors=min_neighbors, verbose=verbose)
if half_res_y:
frame = obj_tmp
n_y = 2*n_y
obj_tmp = np.zeros([n_y,n_x])
for yy in range(n_y):
obj_tmp[yy] = frame[int(yy/2)]
return obj_tmp, nnanpix
if ndims == 2:
obj_tmp, nnanpix = nan_corr_2d(obj_tmp)
if verbose:
print('\n There were ', nnanpix, ' nan pixels corrected.')
elif ndims == 3:
n_z = obj_tmp.shape[0]
for zz in range(n_z):
obj_tmp[zz], nnanpix = nan_corr_2d(obj_tmp[zz])
if verbose:
msg = 'In channel '+str(zz)+', there were '+str(nnanpix)
msg2 = ' nan pixels corrected.'
print(msg+msg2)
if verbose:
print('All nan pixels are corrected.')
return obj_tmp
def approx_stellar_position(cube, fwhm, return_test=False, verbose=False):
"""FIND THE APPROX COORDS OF THE STAR IN EACH CHANNEL (even the ones
dominated by noise)
Parameters
----------
obj_tmp : array_like
Input 3d cube
fwhm : float or array 1D
Input full width half maximum value of the PSF for each channel.
This will be used as the standard deviation for Gaussian kernel
of the Gaussian filtering.
If float, it is assumed the same for all channels.
return_test: bool, {False,True}, optional
Whether the test result vector (a bool vector with whether the star
centroid could be find in the corresponding channel) should be returned
as well, along with the approx stellar coordinates.
verbose: {True, False}, bool optional
Chooses whether to print some additional information.
Returns:
--------
Array of y and x approx coordinates of the star in each channel of the cube
if return_test: it also returns the test result vector
"""
from ..phot import peak_coordinates
obj_tmp = cube.copy()
n_z = obj_tmp.shape[0]
if isinstance(fwhm,float) or isinstance(fwhm,int):
fwhm_scal = fwhm
fwhm = np.zeros((n_z))
fwhm[:] = fwhm_scal
#1/ Write a 2-columns array with indices of all max pixel values in the cube
star_tmp_idx = np.zeros([n_z,2])
star_approx_idx = np.zeros([n_z,2])
test_result = np.ones(n_z)
for zz in range(n_z):
star_tmp_idx[zz] = peak_coordinates(obj_tmp[zz], fwhm[zz])
#2/ Detect the outliers in each column
_, med_y, stddev_y = sigma_clipped_stats(star_tmp_idx[:,0],sigma=2.5)
_, med_x, stddev_x = sigma_clipped_stats(star_tmp_idx[:,1],sigma=2.5)
lim_inf_y = med_y-3*stddev_y
lim_sup_y = med_y+3*stddev_y
lim_inf_x = med_x-3*stddev_x
lim_sup_x = med_x+3*stddev_x
if verbose:
print("median y of star - 3sigma = ", lim_inf_y)
print("median y of star + 3sigma = ", lim_sup_y)
print("median x of star - 3sigma = ", lim_inf_x)
print("median x of star + 3sigma = ", lim_sup_x)
for zz in range(n_z):
if ((star_tmp_idx[zz,0]<lim_inf_y) or (star_tmp_idx[zz,0]>lim_sup_y) or
(star_tmp_idx[zz,1]<lim_inf_x) or (star_tmp_idx[zz,1]>lim_sup_x)):
test_result[zz] = 0
#3/ Replace by the median of neighbouring good coordinates if need be
for zz in range(n_z):
if test_result[zz] == 0:
ii= 1
inf_neigh = max(0,zz-ii)
sup_neigh = min(n_z-1,zz+ii)
while test_result[inf_neigh] == 0 and test_result[sup_neigh] == 0:
ii=ii+1
inf_neigh = max(0,zz-ii)
sup_neigh = min(n_z-1,zz+ii)
if test_result[inf_neigh] == 1 and test_result[sup_neigh] == 1:
star_approx_idx[zz] = np.floor((star_tmp_idx[sup_neigh]+ \
star_tmp_idx[inf_neigh])/2.)
elif test_result[inf_neigh] == 1:
star_approx_idx[zz] = star_tmp_idx[inf_neigh]
else: star_approx_idx[zz] = star_tmp_idx[sup_neigh]
else: star_approx_idx[zz] = star_tmp_idx[zz]
if return_test:
return star_approx_idx, test_result.astype(bool)
else:
return star_approx_idx
|
|
#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import mock
from ec2stack.helpers import read_file, generate_signature
from . import Ec2StackAppTestCase
class InstancesTestCase(Ec2StackAppTestCase):
def test_describe_instance_attribute(self):
data = self.get_example_data()
data['Action'] = 'DescribeInstanceAttribute'
data['InstanceId'] = '43791f77-26f8-48ca-b557-3a9392f735ae'
data['Attribute'] = 'instanceType'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_instance.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DescribeInstanceAttributeResponse' in response.data
def test_describe_invalid_instance_attribute(self):
data = self.get_example_data()
data['Action'] = 'DescribeInstanceAttribute'
data['InstanceId'] = '43791f77-26f8-48ca-b557-3a9392f735ae'
data['Attribute'] = 'invalid_attribute'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_instance.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidParameterValue' in response.data
def test_describe_instances(self):
data = self.get_example_data()
data['Action'] = 'DescribeInstances'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_instances.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DescribeInstancesResponse' in response.data
def test_empty_response_describe_instances(self):
data = self.get_example_data()
data['Action'] = 'DescribeInstances'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/empty_describe_instances.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DescribeInstancesResponse' in response.data
def test_describe_instance_by_id(self):
data = self.get_example_data()
data['Action'] = 'DescribeInstances'
data['InstanceId.1'] = 'aa10a43e-56db-4a34-88bd-1c2a51c0bc04'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_instances.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'DescribeInstancesResponse' in response.data
assert 'aa10a43e-56db-4a34-88bd-1c2a51c0bc04' in response.data
def test_invalid_describe_instance_by_id(self):
data = self.get_example_data()
data['Action'] = 'DescribeInstances'
data['InstanceId.1'] = 'invalid-instance-id'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_describe_instances.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidInstanceId.NotFound' in response.data
def test_empty_response_describe_instance_by_id(self):
data = self.get_example_data()
data['Action'] = 'DescribeInstances'
data['InstanceId.1'] = 'invalid-instance-id'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/empty_describe_instances.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidInstanceId.NotFound' in response.data
def test_reboot_instance(self):
data = self.get_example_data()
data['Action'] = 'RebootInstances'
data['InstanceId.1'] = '076166a1-9f6e-11e3-b8df-3c075456b21a'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_reboot_instance.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'RebootInstancesResponse' in response.data
def test_start_instance(self):
data = self.get_example_data()
data['Action'] = 'StartInstances'
data['InstanceId.1'] = '076166a1-9f6e-11e3-b8df-3c075456b21a'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_start_instance.json'
)
get.return_value.status_code = 200
get_instance = mock.Mock()
get_instance.return_value = json.loads(read_file(
'tests/data/valid_get_instance_by_id.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.instances.describe_instance_by_id',
get_instance
):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'StartInstancesResponse' in response.data
def test_stop_instance(self):
data = self.get_example_data()
data['Action'] = 'StopInstances'
data['InstanceId.1'] = '076166a1-9f6e-11e3-b8df-3c075456b21a'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_stop_instance.json'
)
get.return_value.status_code = 200
get_instance = mock.Mock()
get_instance.return_value = json.loads(read_file(
'tests/data/valid_get_instance_by_id.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.instances.describe_instance_by_id',
get_instance
):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'StopInstancesResponse' in response.data
def test_terminate_instance(self):
data = self.get_example_data()
data['Action'] = 'TerminateInstances'
data['InstanceId.1'] = '076166a1-9f6e-11e3-b8df-3c075456b21a'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_terminate_instance.json'
)
get.return_value.status_code = 200
get_instance = mock.Mock()
get_instance.return_value = json.loads(read_file(
'tests/data/valid_get_instance_by_id.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.instances.describe_instance_by_id',
get_instance
):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'TerminateInstancesResponse' in response.data
def test_run_instance(self):
data = self.get_example_data()
data['Action'] = 'RunInstances'
data['ImageId'] = 'a32d70ee-95e4-11e3-b2e4-d19c9d3e5e1d'
data['MinCount'] = '0'
data['MaxCount'] = '0'
data['SecurityGroupId.1'] = 'example-security-group-id'
data['SecurityGroup.1'] = 'example-security-group-name'
data['KeyName'] = 'example-ssh-key-name'
data['UserData'] = 'example-user-data'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_run_instance.json'
)
get.return_value.status_code = 200
get_service_offering = mock.Mock()
get_service_offering.return_value = json.loads(read_file(
'tests/data/service_offering_search.json'
))
get_zone = mock.Mock()
get_zone.return_value = json.loads(read_file(
'tests/data/zones_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.service_offerings.get_service_offering',
get_service_offering
):
with mock.patch(
'ec2stack.providers.cloudstack.zones.get_zone',
get_zone
):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'RunInstancesResponse' in response.data
def test_run_instance_gp2(self):
data = self.get_example_data()
data['Action'] = 'RunInstances'
data['ImageId'] = 'a32d70ee-95e4-11e3-b2e4-d19c9d3e5e1d'
data['MinCount'] = '0'
data['MaxCount'] = '0'
data['SecurityGroupId.1'] = 'example-security-group-id'
data['SecurityGroup.1'] = 'example-security-group-name'
data['KeyName'] = 'example-ssh-key-name'
data['UserData'] = 'example-user-data'
data['BlockDeviceMapping.1.Ebs.VolumeType'] = 'gp2'
data['BlockDeviceMapping.1.Ebs.VolumeSize'] = '20'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_run_instance.json'
)
get.return_value.status_code = 200
get_disk_offering = mock.Mock()
get_disk_offering.return_value = json.loads(read_file(
'tests/data/disk_offering_search.json'
))
get_service_offering = mock.Mock()
get_service_offering.return_value = json.loads(read_file(
'tests/data/service_offering_search.json'
))
get_zone = mock.Mock()
get_zone.return_value = json.loads(read_file(
'tests/data/zones_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.disk_offerings.get_disk_offering',
get_disk_offering
):
with mock.patch(
'ec2stack.providers.cloudstack.service_offerings.get_service_offering',
get_service_offering
):
with mock.patch(
'ec2stack.providers.cloudstack.zones.get_zone',
get_zone
):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'RunInstancesResponse' in response.data
def test_run_instance_gp2_no_volume_size(self):
data = self.get_example_data()
data['Action'] = 'RunInstances'
data['ImageId'] = 'a32d70ee-95e4-11e3-b2e4-d19c9d3e5e1d'
data['MinCount'] = '0'
data['MaxCount'] = '0'
data['SecurityGroupId.1'] = 'example-security-group-id'
data['SecurityGroup.1'] = 'example-security-group-name'
data['KeyName'] = 'example-ssh-key-name'
data['UserData'] = 'example-user-data'
data['BlockDeviceMapping.1.Ebs.VolumeType'] = 'gp2'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_run_instance.json'
)
get.return_value.status_code = 200
get_disk_offering = mock.Mock()
get_disk_offering.return_value = json.loads(read_file(
'tests/data/disk_offering_search.json'
))
get_service_offering = mock.Mock()
get_service_offering.return_value = json.loads(read_file(
'tests/data/service_offering_search.json'
))
get_zone = mock.Mock()
get_zone.return_value = json.loads(read_file(
'tests/data/zones_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.disk_offerings.get_disk_offering',
get_disk_offering
):
with mock.patch(
'ec2stack.providers.cloudstack.service_offerings.get_service_offering',
get_service_offering
):
with mock.patch(
'ec2stack.providers.cloudstack.zones.get_zone',
get_zone
):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'VolumeSize not found in BlockDeviceMapping' in response.data
def test_run_instance_with_zone_and_type_(self):
data = self.get_example_data()
data['Action'] = 'RunInstances'
data['InstanceType'] = 'micro'
data['Placement.AvailabilityZone'] = 'example-zone'
data['ImageId'] = 'a32d70ee-95e4-11e3-b2e4-d19c9d3e5e1d'
data['MinCount'] = '0'
data['MaxCount'] = '0'
data['SecurityGroupId.1'] = 'example-security-group-id'
data['SecurityGroup.1'] = 'example-security-group-name'
data['KeyName'] = 'example-ssh-key-name'
data['UserData'] = 'example-user-data'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_run_instance.json'
)
get.return_value.status_code = 200
get_service_offering = mock.Mock()
get_service_offering.return_value = json.loads(read_file(
'tests/data/service_offering_search.json'
))
get_zone = mock.Mock()
get_zone.return_value = json.loads(read_file(
'tests/data/zones_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.service_offerings.get_service_offering',
get_service_offering
):
with mock.patch(
'ec2stack.providers.cloudstack.zones.get_zone',
get_zone
):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'RunInstancesResponse' in response.data
def test_run_instance_invalid_image_id(self):
data = self.get_example_data()
data['Action'] = 'RunInstances'
data['ImageId'] = 'invalid-id'
data['MinCount'] = '0'
data['MaxCount'] = '0'
data['SecurityGroupId.1'] = 'example-security-group-id'
data['SecurityGroup.1'] = 'example-security-group-name'
data['KeyName'] = 'example-ssh-key-name'
data['UserData'] = 'example-user-data'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/invalid_run_instance_image_not_found.json'
)
get.return_value.status_code = 431
get_service_offering = mock.Mock()
get_service_offering.return_value = json.loads(read_file(
'tests/data/service_offering_search.json'
))
get_zone = mock.Mock()
get_zone.return_value = json.loads(read_file(
'tests/data/zones_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.service_offerings.get_service_offering',
get_service_offering
):
with mock.patch(
'ec2stack.providers.cloudstack.zones.get_zone',
get_zone
):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidAMIID.NotFound' in response.data
def test_run_instance_invalid_security_group(self):
data = self.get_example_data()
data['Action'] = 'RunInstances'
data['ImageId'] = 'a32d70ee-95e4-11e3-b2e4-d19c9d3e5e1d'
data['MinCount'] = '0'
data['MaxCount'] = '0'
data['SecurityGroupId.1'] = 'invalid-security-group-id'
data['SecurityGroup.1'] = 'invalid-security-group-name'
data['KeyName'] = 'example-ssh-key-name'
data['UserData'] = 'example-user-data'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/invalid_run_instance_security_group_not_found.json'
)
get.return_value.status_code = 431
get_service_offering = mock.Mock()
get_service_offering.return_value = json.loads(read_file(
'tests/data/service_offering_search.json'
))
get_zone = mock.Mock()
get_zone.return_value = json.loads(read_file(
'tests/data/zones_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.service_offerings.get_service_offering',
get_service_offering
):
with mock.patch(
'ec2stack.providers.cloudstack.zones.get_zone',
get_zone
):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidGroup.NotFound' in response.data
def test_run_instance_invalid_keyname(self):
data = self.get_example_data()
data['Action'] = 'RunInstances'
data['ImageId'] = 'a32d70ee-95e4-11e3-b2e4-d19c9d3e5e1d'
data['MinCount'] = '0'
data['MaxCount'] = '0'
data['SecurityGroupId.1'] = 'example-security-group-id'
data['SecurityGroup.1'] = 'example-security-group-name'
data['KeyName'] = 'invalid-ssh-key-name'
data['UserData'] = 'example-user-data'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/invalid_run_instance_keypair_not_found.json'
)
get.return_value.status_code = 431
get_service_offering = mock.Mock()
get_service_offering.return_value = json.loads(read_file(
'tests/data/service_offering_search.json'
))
get_zone = mock.Mock()
get_zone.return_value = json.loads(read_file(
'tests/data/zones_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.service_offerings.get_service_offering',
get_service_offering
):
with mock.patch(
'ec2stack.providers.cloudstack.zones.get_zone',
get_zone
):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidKeyPair.NotFound' in response.data
def test_run_instance_unknown_issue(self):
data = self.get_example_data()
data['Action'] = 'RunInstances'
data['ImageId'] = 'a32d70ee-95e4-11e3-b2e4-d19c9d3e5e1d'
data['MinCount'] = '0'
data['MaxCount'] = '0'
data['SecurityGroupId.1'] = 'example-security-group-id'
data['SecurityGroup.1'] = 'example-security-group-name'
data['KeyName'] = 'example-ssh-key-name'
data['UserData'] = 'example-user-data'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/invalid_run_instance_unknown_issue.json'
)
get.return_value.status_code = 431
get_service_offering = mock.Mock()
get_service_offering.return_value = json.loads(read_file(
'tests/data/service_offering_search.json'
))
get_zone = mock.Mock()
get_zone.return_value = json.loads(read_file(
'tests/data/zones_search.json'
))
with mock.patch('requests.get', get):
with mock.patch(
'ec2stack.providers.cloudstack.service_offerings.get_service_offering',
get_service_offering
):
with mock.patch(
'ec2stack.providers.cloudstack.zones.get_zone',
get_zone
):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidRequest' in response.data
|
|
"""
Reference-physical domain mappings.
"""
import numpy as nm
from sfepy.base.base import Struct
class PhysicalQPs(Struct):
"""
Physical quadrature points in a region.
"""
def __init__(self, num=0):
Struct.__init__(self, num=num, shape=(0, 0, 0))
self.values = nm.empty(self.shape, dtype=nm.float64)
def get_shape(self, rshape):
"""
Get shape from raveled shape.
"""
n_qp = self.shape[1]
if n_qp > 0:
if (rshape[0] / n_qp) * n_qp != rshape[0]:
raise ValueError('incompatible shapes! (n_qp: %d, %s)'
% (n_qp, rshape))
shape = (rshape[0] // n_qp, n_qp) + rshape[1:]
else:
shape = (rshape[0], 0, 0, 0)
return shape
class Mapping(Struct):
"""
Base class for mappings.
"""
@staticmethod
def from_args(region, kind='v'):
"""
Create mapping from reference to physical entities in a given
region, given the integration kind ('v' or 's').
This mapping can be used to compute the physical quadrature
points.
Parameters
----------
region : Region instance
The region defining the entities.
kind : 'v' or 's'
The kind of the entities: 'v' - cells, 's' - facets.
Returns
-------
mapping : VolumeMapping or SurfaceMapping instance
The requested mapping.
"""
from sfepy.discrete.fem.domain import FEDomain
from sfepy.discrete.iga.domain import IGDomain
if isinstance(region.domain, FEDomain):
import sfepy.discrete.fem.mappings as mm
coors = region.domain.get_mesh_coors()
if kind == 's':
coors = coors[region.vertices]
conn, gel = region.domain.get_conn(ret_gel=True)
if kind == 'v':
cells = region.get_cells()
mapping = mm.VolumeMapping(coors, conn[cells], gel=gel)
elif kind == 's':
from sfepy.discrete.fem.fe_surface import FESurface
aux = FESurface('aux', region, gel.get_surface_entities(),
conn)
mapping = mm.SurfaceMapping(coors, aux.leconn,
gel=gel.surface_facet)
elif isinstance(region.domain, IGDomain):
import sfepy.discrete.iga.mappings as mm
mapping = mm.IGMapping(region.domain, region.cells)
else:
raise ValueError('unknown domain class! (%s)' % type(region.domain))
return mapping
def get_physical_qps(region, integral, map_kind=None):
"""
Get physical quadrature points corresponding to the given region
and integral.
"""
phys_qps = PhysicalQPs()
if map_kind is None:
map_kind = 'v' if region.can_cells else 's'
gmap = Mapping.from_args(region, map_kind)
gel = gmap.get_geometry()
qp_coors, _ = integral.get_qp(gel.name)
qps = gmap.get_physical_qps(qp_coors)
n_el, n_qp = qps.shape[0], qps.shape[1]
phys_qps.num = n_el * n_qp
phys_qps.shape = qps.shape
qps.shape = (phys_qps.num, qps.shape[2])
phys_qps.values = qps
return phys_qps
def get_mapping_data(name, field, integral, region=None, integration='volume'):
"""
General helper function for accessing reference mapping data.
Get data attribute `name` from reference mapping corresponding to
`field` in `region` in quadrature points of the given `integral` and
`integration` type.
Parameters
----------
name : str
The reference mapping attribute name.
field : Field instance
The field defining the reference mapping.
integral : Integral instance
The integral defining quadrature points.
region : Region instance, optional
If given, use the given region instead of `field` region.
integration : one of ('volume', 'surface', 'surface_extra')
The integration type.
Returns
-------
data : array
The required data merged for all element groups.
Notes
-----
Assumes the same element geometry in all element groups of the field!
"""
data = None
if region is None:
region = field.region
geo, _ = field.get_mapping(region, integral, integration)
data = getattr(geo, name)
return data
def get_jacobian(field, integral, region=None, integration='volume'):
"""
Get the jacobian of reference mapping corresponding to `field`.
Parameters
----------
field : Field instance
The field defining the reference mapping.
integral : Integral instance
The integral defining quadrature points.
region : Region instance, optional
If given, use the given region instead of `field` region.
integration : one of ('volume', 'surface', 'surface_extra')
The integration type.
Returns
-------
jac : array
The jacobian merged for all element groups.
See Also
--------
get_mapping_data()
Notes
-----
Assumes the same element geometry in all element groups of the field!
"""
jac = get_mapping_data('det', field, integral, region=region,
integration=integration)
return jac
def get_normals(field, integral, region):
"""
Get the normals of element faces in `region`.
Parameters
----------
field : Field instance
The field defining the reference mapping.
integral : Integral instance
The integral defining quadrature points.
region : Region instance
The given of the element faces.
Returns
-------
normals : array
The normals merged for all element groups.
See Also
--------
get_mapping_data()
Notes
-----
Assumes the same element geometry in all element groups of the field!
"""
normals = get_mapping_data('normal', field, integral, region=region,
integration='surface')
return normals
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations(object):
"""SecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SecurityRule"]
"""Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or update network security
rule operation.
:type security_rule_parameters: ~azure.mgmt.network.v2016_09_01.models.SecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.SecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SecurityRuleListResult"]
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_09_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to train a neural networks for NLP tagging tasks.
Author: Erick Rocha Fonseca
"""
import logging
import numpy as np
import nlpnet.config as config
import nlpnet.utils as utils
import nlpnet.taggers as taggers
import nlpnet.metadata as metadata
import nlpnet.srl as srl
import nlpnet.pos as pos
import nlpnet.parse as parse
import nlpnet.arguments as arguments
import nlpnet.reader as reader
import nlpnet.attributes as attributes
from nlpnet.network import Network, ConvolutionalNetwork, ConvolutionalDependencyNetwork
############################
### FUNCTION DEFINITIONS ###
############################
def create_reader(args, md, validation=False):
"""
Creates and returns a TextReader object according to the task at hand.
:param args: the object containing the program arguments
:param md: metadata for the task
:param validation: whether the reader should read the validation data
from `args`
"""
if validation:
msg = "validation"
filename = args.dev
else:
msg = "training"
filename = args.gold
logger.info("Reading %s data..." % msg)
if args.task == 'pos':
text_reader = pos.POSReader(md, filename=filename)
if args.suffix:
text_reader.create_affix_list('suffix', args.suffix_size, 5)
if args.prefix:
text_reader.create_affix_list('prefix', args.prefix_size, 5)
elif args.task.startswith('srl'):
text_reader = srl.SRLReader(md, filename=filename, only_boundaries=args.identify,
only_classify=args.classify,
only_predicates=args.predicates)
if args.identify:
# only identify arguments
text_reader.convert_tags('iobes', only_boundaries=True)
elif not args.classify and not args.predicates:
# this is SRL as one step, we use IOB
text_reader.convert_tags('iob', update_tag_dict=False)
elif 'dependency' in args.task:
text_reader = parse.DependencyReader(md, filename)
else:
raise ValueError("Unknown task: %s" % args.task)
text_reader.codify_sentences()
return text_reader
def create_network(args, text_reader, feature_tables, md):
"""Creates and returns the neural network according to the task at hand."""
logger = logging.getLogger("Logger")
is_dependency = 'dependency' in args.task
is_convolution_srl = args.task.startswith('srl') and args.task != 'srl_predicates'
is_convolution = is_convolution_srl or is_dependency
if is_convolution:
# get some data structures used both by dep parsing and SRL
distance_tables = utils.set_distance_features(args.max_dist, args.target_features,
args.pred_features)
padding_left = text_reader.converter.get_padding_left(False)
padding_right = text_reader.converter.get_padding_right(False)
if is_dependency:
output_size = 1 if not args.labeled else len(text_reader.tag_dict)
nn = ConvolutionalDependencyNetwork.create_new(feature_tables, distance_tables[0],
distance_tables[1], args.window,
args.convolution, args.hidden, output_size)
else:
# not dependency (SRL)
num_tags = len(text_reader.tag_dict)
nn = ConvolutionalNetwork.create_new(feature_tables, distance_tables[0],
distance_tables[1], args.window,
args.convolution, args.hidden, num_tags)
if args.identify:
logger.info("Loading initial transition scores table for argument identification")
transitions = srl.init_transitions_simplified(text_reader.tag_dict)
nn.transitions = transitions
nn.learning_rate_trans = args.learning_rate_transitions
elif not args.classify:
logger.info("Loading initial IOB transition scores table")
transitions = srl.init_transitions(text_reader.tag_dict, 'iob')
nn.transitions = transitions
nn.learning_rate_trans = args.learning_rate_transitions
else:
# not convolution
num_tags = len(text_reader.tag_dict)
nn = Network.create_new(feature_tables, args.window, args.hidden, num_tags)
if args.learning_rate_transitions > 0:
transitions = np.zeros((num_tags + 1, num_tags), np.float)
nn.transitions = transitions
nn.learning_rate_trans = args.learning_rate_transitions
padding_left = text_reader.converter.get_padding_left(args.task == 'pos')
padding_right = text_reader.converter.get_padding_right(args.task == 'pos')
nn.padding_left = np.array(padding_left)
nn.padding_right = np.array(padding_right)
nn.learning_rate = args.learning_rate
nn.learning_rate_features = args.learning_rate_features
if 'convolution' in args and args.convolution > 0 and args.hidden > 0:
layer_sizes = (nn.input_size, nn.hidden_size, nn.hidden2_size, nn.output_size)
else:
layer_sizes = (nn.input_size, nn.hidden_size, nn.output_size)
logger.info("Created new network with the following layer sizes: %s"
% ', '.join(str(x) for x in layer_sizes))
nn.network_filename = config.FILES[md.network]
return nn
def load_network_train(args, md):
"""Loads and returns a neural network with all the necessary data."""
nn = taggers.load_network(md)
logger.info("Loaded network with following parameters:")
logger.info(nn.description())
nn.learning_rate = args.learning_rate
nn.learning_rate_features = args.learning_rate_features
if md.task.startswith('srl') or md.task == 'pos':
nn.learning_rate_trans = args.learning_rate_transitions
return nn
def create_metadata(args):
"""Creates a Metadata object from the given arguments."""
# using getattr because the SRL args object doesn't have a "suffix" attribute
use_caps = getattr(args, 'caps', False)
use_suffix = getattr(args, 'suffix', False)
use_prefix = getattr(args, 'prefix', False)
use_pos = getattr(args, 'pos', False)
use_chunk = getattr(args, 'chunk', False)
return metadata.Metadata(args.task, None, use_caps, use_suffix, use_prefix,
use_pos, use_chunk)
def set_validation_data(nn, task, reader):
"""Sets the neural network validation data."""
if task == 'pos' or task == 'srl_predicates':
nn.set_validation_data(reader.sentences, reader.tags)
elif task.startswith('srl') and task != 'srl_predicates':
arg_limits = None if task != 'srl_classify' else reader.arg_limits
nn.set_validation_data(reader.sentences, reader.predicates,
reader.tags, arg_limits)
elif task.endswith('dependency'):
if task.startswith('unlabeled'):
nn.set_validation_data(reader.sentences, reader.heads)
else:
nn.set_validation_data(reader.sentences, reader.heads, reader.labels)
else:
raise ValueError('Unknown task: %s' % task)
def load_or_create_metadata(args):
"""
Loads or creates a metadata object, depending on command line arguments.
"""
if not args.load_network:
# if we are about to create a new network, create the metadata too
md = create_metadata(args)
md.save_to_file()
else:
md = metadata.Metadata.load_from_file(args.task)
return md
def train(nn, reader, args):
"""Trains a neural network for the selected task."""
num_sents = len(reader.sentences)
logger.info("Starting training with %d sentences" % num_sents)
avg_len = sum(len(x) for x in text_reader.sentences) / float(num_sents)
logger.debug("Average sentence length is %f tokens" % avg_len)
logger.debug("Network connection learning rate: %f" % nn.learning_rate)
logger.debug("Feature vectors learning rate: %f" % nn.learning_rate_features)
logger.debug("Tag transition matrix learning rate: %f\n" % nn.learning_rate_trans)
intervals = max(args.iterations / 200, 1)
np.seterr(over='raise')
if args.decay:
nn.set_learning_rate_decay(args.decay)
if args.task.startswith('srl') and args.task != 'srl_predicates':
arg_limits = None if args.task != 'srl_classify' else text_reader.arg_limits
nn.train(reader.sentences, reader.predicates, reader.tags,
args.iterations, intervals, args.accuracy, arg_limits)
elif args.task.endswith('dependency'):
if args.labeled:
nn.train(reader.sentences, reader.heads, args.iterations,
intervals, args.accuracy, text_reader.labels)
else:
nn.train(reader.sentences, reader.heads, args.iterations,
intervals, args.accuracy)
else:
nn.train(reader.sentences, reader.tags,
args.iterations, intervals, args.accuracy)
if __name__ == '__main__':
args = arguments.get_args()
logging_level = logging.DEBUG if args.verbose else logging.INFO
utils.set_logger(logging_level)
logger = logging.getLogger("Logger")
config.set_data_dir(args.data)
md = load_or_create_metadata(args)
text_reader = create_reader(args, md)
if args.load_network:
logger.info("Loading provided network...")
nn = load_network_train(args, md)
else:
logger.info('Creating new network...')
feature_tables = utils.create_feature_tables(args, md, text_reader)
nn = create_network(args, text_reader, feature_tables, md)
if args.dev is not None:
validation_reader = create_reader(args, md, True)
set_validation_data(nn, args.task, validation_reader)
train(nn, text_reader, args)
logger.info("Finished training")
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for upsampling"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
import math
from tvm.topi.util import nchw_pack_layout
def verify_upsampling(
batch,
in_channel,
in_height,
in_width,
scale_h,
scale_w,
layout="NCHW",
method="nearest_neighbor",
in_batch_block=0,
in_channel_block=0,
):
if layout == "NCHW":
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
dtype = A.dtype
out_shape = (
batch,
in_channel,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
)
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
elif nchw_pack_layout(layout):
A = te.placeholder(
(batch, in_channel, in_height, in_width, in_batch_block, in_channel_block), name="A"
)
dtype = A.dtype
out_shape = (
batch,
in_channel,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_batch_block,
in_channel_block,
)
a_np = np.random.uniform(
size=(batch, in_channel, in_height, in_width, in_batch_block, in_channel_block)
).astype(dtype)
elif layout == "NHWC":
A = te.placeholder((batch, in_height, in_width, in_channel), name="A")
dtype = A.dtype
out_shape = (
batch,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_channel,
)
a_np = np.random.uniform(size=(batch, in_height, in_width, in_channel)).astype(dtype)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.nn.upsampling(A, scale_h, scale_w, layout=layout, method=method, align_corners=False)
if method == "bilinear":
out_size = (int(round(in_height * scale_h)), int(round(in_width * scale_w)))
b_np = tvm.topi.testing.bilinear_resize_python(a_np, out_size, layout, "asymmetric")
else:
b_np = tvm.topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout)
def check_device(device, ctx):
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5)
for device, ctx in tvm.testing.enabled_targets():
check_device(device, ctx)
@tvm.testing.uses_gpu
def test_upsampling():
# nearest_neighbor - NCHW
verify_upsampling(8, 16, 32, 32, 2.0, 2.0)
verify_upsampling(2, 32, 64, 64, 3.0, 3.0)
verify_upsampling(1, 64, 22, 32, 1.954545497894287, 2.0)
## nearest_neighbor - NHWC
verify_upsampling(8, 16, 32, 32, 2.0, 2.0, layout="NHWC")
verify_upsampling(2, 32, 64, 64, 3.0, 3.0, layout="NHWC")
verify_upsampling(1, 64, 22, 32, 1.954545497894287, 2.0, layout="NHWC")
# bilinear - NCHW
verify_upsampling(2, 2, 32, 32, 2.0, 2.0, method="bilinear")
verify_upsampling(2, 2, 32, 32, 3.0, 3.0, method="bilinear")
verify_upsampling(1, 64, 22, 32, 1.954545497894287, 2.0, method="bilinear")
# nearest_neighbor - NCHWinic
verify_upsampling(2, 2, 32, 32, in_batch_block=4, in_channel_block=8, scale_h=2.0, scale_w=2.0)
verify_upsampling(2, 2, 64, 64, in_batch_block=1, in_channel_block=16, scale_h=3.0, scale_w=3.0)
verify_upsampling(
1, 4, 22, 32, in_batch_block=1, in_channel_block=16, scale_h=1.954545497894287, scale_w=2.0
)
# bilinear - NCHWinic
verify_upsampling(
2,
2,
32,
32,
in_batch_block=1,
in_channel_block=1,
scale_h=2.0,
scale_w=2.0,
method="bilinear",
)
verify_upsampling(
2,
2,
32,
32,
in_batch_block=1,
in_channel_block=1,
scale_h=3.0,
scale_w=3.0,
method="bilinear",
)
verify_upsampling(
2,
4,
22,
32,
in_batch_block=1,
in_channel_block=16,
scale_h=1.954545497894287,
scale_w=2.0,
layout="NCHW1n16c",
method="bilinear",
)
# bilinear - NHWC
verify_upsampling(2, 2, 32, 32, 2.0, 2.0, layout="NHWC", method="bilinear")
verify_upsampling(2, 2, 32, 32, 3.0, 3.0, layout="NHWC", method="bilinear")
verify_upsampling(1, 64, 22, 32, 3.0, 3.0, layout="NHWC", method="bilinear")
def verify_upsampling3d(
batch,
in_channel,
in_depth,
in_height,
in_width,
scale_d,
scale_h,
scale_w,
layout="NCDHW",
method="nearest_neighbor",
):
if layout == "NCDHW":
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
dtype = A.dtype
out_shape = (
batch,
in_channel,
int(round(in_depth * scale_d)),
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
)
a_np = np.random.uniform(size=(batch, in_channel, in_depth, in_height, in_width)).astype(
dtype
)
elif layout == "NDHWC":
A = te.placeholder((batch, in_depth, in_height, in_width, in_channel), name="A")
dtype = A.dtype
out_shape = (
batch,
int(round(in_depth * scale_d)),
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_channel,
)
a_np = np.random.uniform(size=(batch, in_depth, in_height, in_width, in_channel)).astype(
dtype
)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.nn.upsampling3d(
A,
scale_d,
scale_h,
scale_w,
layout=layout,
method=method,
coordinate_transformation_mode="half_pixel",
)
if method == "trilinear":
out_size = (
int(round(in_depth * scale_d)),
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
)
b_np = tvm.topi.testing.trilinear_resize3d_python(
a_np, out_size, layout, coordinate_transformation_mode="half_pixel"
)
else:
b_np = tvm.topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout)
def check_device(device, ctx):
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5)
for device, ctx in tvm.testing.enabled_targets():
check_device(device, ctx)
@tvm.testing.uses_gpu
def test_upsampling3d():
# nearest_neighbor - NCDHW
verify_upsampling3d(8, 8, 16, 16, 16, 2.0, 2.0, 2.0)
verify_upsampling3d(2, 16, 32, 32, 32, 3.0, 3.0, 3.0)
verify_upsampling3d(1, 8, 11, 16, 6, 1.954545497894287, 2.0, 1.5)
## nearest_neighbor - NDHWC
verify_upsampling3d(8, 8, 16, 16, 16, 2.0, 2.0, 2.0, layout="NDHWC")
verify_upsampling3d(2, 16, 32, 32, 32, 3.0, 3.0, 3.0, layout="NDHWC")
verify_upsampling3d(1, 8, 11, 16, 6, 1.954545497894287, 2.0, 1.5, layout="NDHWC")
# trilinear - NCDHW
verify_upsampling3d(2, 2, 16, 16, 16, 2.0, 2.0, 2.0, method="trilinear")
verify_upsampling3d(2, 2, 32, 32, 32, 3.0, 3.0, 3.0, method="trilinear")
verify_upsampling3d(1, 2, 11, 16, 6, 1.954545497894287, 2.0, 1.5, method="trilinear")
# trilinear - NDHWC
verify_upsampling3d(2, 2, 16, 16, 16, 2.0, 2.0, 2.0, layout="NDHWC", method="trilinear")
verify_upsampling3d(2, 2, 32, 32, 32, 3.0, 3.0, 3.0, layout="NDHWC", method="trilinear")
verify_upsampling3d(
1, 2, 11, 16, 6, 1.954545497894287, 2.0, 1.5, layout="NDHWC", method="trilinear"
)
if __name__ == "__main__":
test_upsampling()
test_upsampling3d()
|
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.util import orderdict
from slicc.util import PairContainer
from slicc.symbols.Symbol import Symbol
from slicc.symbols.Var import Var
class DataMember(Var):
def __init__(self, symtab, ident, location, type, code, pairs,
machine, init_code):
super(DataMember, self).__init__(symtab, ident, location, type,
code, pairs, machine)
self.init_code = init_code
class Enumeration(PairContainer):
def __init__(self, ident, pairs):
super(Enumeration, self).__init__(pairs)
self.ident = ident
self.primary = False
class Type(Symbol):
def __init__(self, table, ident, location, pairs, machine=None):
super(Type, self).__init__(table, ident, location, pairs)
self.c_ident = ident
self.abstract_ident = ""
if machine:
if self.isExternal or self.isPrimitive:
if "external_name" in self:
self.c_ident = self["external_name"]
else:
# Append with machine name
self.c_ident = "%s_%s" % (machine, ident)
self.pairs.setdefault("desc", "No description avaliable")
# check for interface that this Type implements
if "interface" in self:
interface = self["interface"]
if interface in ("Message"):
self["message"] = "yes"
# FIXME - all of the following id comparisons are fragile hacks
if self.ident in ("CacheMemory"):
self["cache"] = "yes"
if self.ident in ("TBETable"):
self["tbe"] = "yes"
if self.ident == "TimerTable":
self["timer"] = "yes"
if self.ident == "DirectoryMemory":
self["dir"] = "yes"
if self.ident == "PersistentTable":
self["persistent"] = "yes"
if self.ident == "Prefetcher":
self["prefetcher"] = "yes"
self.isMachineType = (ident == "MachineType")
self.isStateDecl = ("state_decl" in self)
self.statePermPairs = []
self.data_members = orderdict()
self.methods = {}
self.enums = orderdict()
@property
def isPrimitive(self):
return "primitive" in self
@property
def isMessage(self):
return "message" in self
@property
def isBuffer(self):
return "buffer" in self
@property
def isInPort(self):
return "inport" in self
@property
def isOutPort(self):
return "outport" in self
@property
def isEnumeration(self):
return "enumeration" in self
@property
def isExternal(self):
return "external" in self
@property
def isGlobal(self):
return "global" in self
@property
def isInterface(self):
return "interface" in self
# Return false on error
def addDataMember(self, ident, type, pairs, init_code):
if ident in self.data_members:
return False
member = DataMember(self.symtab, ident, self.location, type,
"m_%s" % ident, pairs, None, init_code)
self.data_members[ident] = member
self.symtab.registerSym(ident, member)
return True
def dataMemberType(self, ident):
return self.data_members[ident].type
def methodId(self, name, param_type_vec):
return '_'.join([name] + [ pt.c_ident for pt in param_type_vec ])
def methodIdAbstract(self, name, param_type_vec):
return '_'.join([name] + [ pt.abstract_ident for pt in param_type_vec ])
def statePermPairAdd(self, state_name, perm_name):
self.statePermPairs.append([state_name, perm_name])
def addFunc(self, func):
ident = self.methodId(func.ident, func.param_types)
if ident in self.methods:
return False
self.methods[ident] = func
return True
def addEnum(self, ident, pairs):
if ident in self.enums:
return False
self.enums[ident] = Enumeration(ident, pairs)
# Add default
if "default" not in self:
self["default"] = "%s_NUM" % self.c_ident
return True
## Used to check if an enum has been already used and therefore
## should not be used again.
def checkEnum(self, ident):
if ident in self.enums and not self.enums[ident].primary:
self.enums[ident].primary = True
return True
return False
def writeCodeFiles(self, path, includes):
if self.isExternal:
# Do nothing
pass
elif self.isEnumeration:
self.printEnumHH(path)
self.printEnumCC(path)
else:
# User defined structs and messages
self.printTypeHH(path)
self.printTypeCC(path)
def printTypeHH(self, path):
code = self.symtab.codeFormatter()
code('''
/** \\file ${{self.c_ident}}.hh
*
*
* Auto generated C++ code started by $__file__:$__line__
*/
#ifndef __${{self.c_ident}}_HH__
#define __${{self.c_ident}}_HH__
#include <iostream>
#include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
''')
for dm in self.data_members.values():
if not dm.type.isPrimitive:
code('#include "mem/protocol/$0.hh"', dm.type.c_ident)
parent = ""
if "interface" in self:
code('#include "mem/protocol/$0.hh"', self["interface"])
parent = " : public %s" % self["interface"]
code('''
$klass ${{self.c_ident}}$parent
{
public:
${{self.c_ident}}
''', klass="class")
if self.isMessage:
code('(Tick curTime) : %s(curTime) {' % self["interface"])
else:
code('()\n\t\t{')
code.indent()
if not self.isGlobal:
code.indent()
for dm in self.data_members.values():
ident = dm.ident
if "default" in dm:
# look for default value
code('m_$ident = ${{dm["default"]}}; // default for this field')
elif "default" in dm.type:
# Look for the type default
tid = dm.type.c_ident
code('m_$ident = ${{dm.type["default"]}}; // default value of $tid')
else:
code('// m_$ident has no default')
code.dedent()
code('}')
# ******** Copy constructor ********
if not self.isGlobal:
code('${{self.c_ident}}(const ${{self.c_ident}}&other)')
# Call superclass constructor
if "interface" in self:
code(' : ${{self["interface"]}}(other)')
code('{')
code.indent()
for dm in self.data_members.values():
code('m_${{dm.ident}} = other.m_${{dm.ident}};')
code.dedent()
code('}')
# ******** Full init constructor ********
if not self.isGlobal:
params = [ 'const %s& local_%s' % (dm.type.c_ident, dm.ident) \
for dm in self.data_members.itervalues() ]
params = ', '.join(params)
if self.isMessage:
params = "const Tick curTime, " + params
code('${{self.c_ident}}($params)')
# Call superclass constructor
if "interface" in self:
if self.isMessage:
code(' : ${{self["interface"]}}(curTime)')
else:
code(' : ${{self["interface"]}}()')
code('{')
code.indent()
for dm in self.data_members.values():
code('m_${{dm.ident}} = local_${{dm.ident}};')
code.dedent()
code('}')
# create a clone member
if self.isMessage:
code('''
MsgPtr
clone() const
{
return std::shared_ptr<Message>(new ${{self.c_ident}}(*this));
}
''')
else:
code('''
${{self.c_ident}}*
clone() const
{
return new ${{self.c_ident}}(*this);
}
''')
if not self.isGlobal:
# const Get methods for each field
code('// Const accessors methods for each field')
for dm in self.data_members.values():
code('''
/** \\brief Const accessor method for ${{dm.ident}} field.
* \\return ${{dm.ident}} field
*/
const ${{dm.type.c_ident}}&
get${{dm.ident}}() const
{
return m_${{dm.ident}};
}
''')
# Non-const Get methods for each field
code('// Non const Accessors methods for each field')
for dm in self.data_members.values():
code('''
/** \\brief Non-const accessor method for ${{dm.ident}} field.
* \\return ${{dm.ident}} field
*/
${{dm.type.c_ident}}&
get${{dm.ident}}()
{
return m_${{dm.ident}};
}
''')
#Set methods for each field
code('// Mutator methods for each field')
for dm in self.data_members.values():
code('''
/** \\brief Mutator method for ${{dm.ident}} field */
void
set${{dm.ident}}(const ${{dm.type.c_ident}}& local_${{dm.ident}})
{
m_${{dm.ident}} = local_${{dm.ident}};
}
''')
code('void print(std::ostream& out) const;')
code.dedent()
code(' //private:')
code.indent()
# Data members for each field
for dm in self.data_members.values():
if "abstract" not in dm:
const = ""
init = ""
# global structure
if self.isGlobal:
const = "static const "
# init value
if dm.init_code:
# only global structure can have init value here
assert self.isGlobal
init = " = %s" % (dm.init_code)
if "desc" in dm:
code('/** ${{dm["desc"]}} */')
code('$const${{dm.type.c_ident}} m_${{dm.ident}}$init;')
# Prototypes for methods defined for the Type
for item in self.methods:
proto = self.methods[item].prototype
if proto:
code('$proto')
code.dedent()
code('};')
code('''
inline std::ostream&
operator<<(std::ostream& out, const ${{self.c_ident}}& obj)
{
obj.print(out);
out << std::flush;
return out;
}
#endif // __${{self.c_ident}}_HH__
''')
code.write(path, "%s.hh" % self.c_ident)
def printTypeCC(self, path):
code = self.symtab.codeFormatter()
code('''
/** \\file ${{self.c_ident}}.cc
*
* Auto generated C++ code started by $__file__:$__line__
*/
#include <iostream>
#include <memory>
#include "mem/protocol/${{self.c_ident}}.hh"
#include "mem/ruby/system/RubySystem.hh"
using namespace std;
''')
code('''
/** \\brief Print the state of this object */
void
${{self.c_ident}}::print(ostream& out) const
{
out << "[${{self.c_ident}}: ";
''')
# For each field
code.indent()
for dm in self.data_members.values():
if dm.type.c_ident == "Addr":
code('''
out << "${{dm.ident}} = " << printAddress(m_${{dm.ident}}) << " ";''')
else:
code('out << "${{dm.ident}} = " << m_${{dm.ident}} << " ";''')
code.dedent()
# Trailer
code('''
out << "]";
}''')
# print the code for the methods in the type
for item in self.methods:
code(self.methods[item].generateCode())
code.write(path, "%s.cc" % self.c_ident)
def printEnumHH(self, path):
code = self.symtab.codeFormatter()
code('''
/** \\file ${{self.c_ident}}.hh
*
* Auto generated C++ code started by $__file__:$__line__
*/
#ifndef __${{self.c_ident}}_HH__
#define __${{self.c_ident}}_HH__
#include <iostream>
#include <string>
''')
if self.isStateDecl:
code('#include "mem/protocol/AccessPermission.hh"')
if self.isMachineType:
code('#include "base/misc.hh"')
code('#include "mem/ruby/common/Address.hh"')
code('#include "mem/ruby/common/TypeDefines.hh"')
code('struct MachineID;')
code('''
// Class definition
/** \\enum ${{self.c_ident}}
* \\brief ${{self.desc}}
*/
enum ${{self.c_ident}} {
${{self.c_ident}}_FIRST,
''')
code.indent()
# For each field
for i,(ident,enum) in enumerate(self.enums.iteritems()):
desc = enum.get("desc", "No description avaliable")
if i == 0:
init = ' = %s_FIRST' % self.c_ident
else:
init = ''
code('${{self.c_ident}}_${{enum.ident}}$init, /**< $desc */')
code.dedent()
code('''
${{self.c_ident}}_NUM
};
// Code to convert from a string to the enumeration
${{self.c_ident}} string_to_${{self.c_ident}}(const std::string& str);
// Code to convert state to a string
std::string ${{self.c_ident}}_to_string(const ${{self.c_ident}}& obj);
// Code to increment an enumeration type
${{self.c_ident}} &operator++(${{self.c_ident}} &e);
''')
# MachineType hack used to set the base component id for each Machine
if self.isMachineType:
code('''
int ${{self.c_ident}}_base_level(const ${{self.c_ident}}& obj);
MachineType ${{self.c_ident}}_from_base_level(int);
int ${{self.c_ident}}_base_number(const ${{self.c_ident}}& obj);
int ${{self.c_ident}}_base_count(const ${{self.c_ident}}& obj);
''')
for enum in self.enums.itervalues():
if enum.ident == "DMA":
code('''
MachineID map_Address_to_DMA(const Addr &addr);
''')
code('''
MachineID get${{enum.ident}}MachineID(NodeID RubyNode);
''')
if self.isStateDecl:
code('''
// Code to convert the current state to an access permission
AccessPermission ${{self.c_ident}}_to_permission(const ${{self.c_ident}}& obj);
''')
# Trailer
code('''
std::ostream& operator<<(std::ostream& out, const ${{self.c_ident}}& obj);
#endif // __${{self.c_ident}}_HH__
''')
code.write(path, "%s.hh" % self.c_ident)
def printEnumCC(self, path):
code = self.symtab.codeFormatter()
code('''
/** \\file ${{self.c_ident}}.hh
*
* Auto generated C++ code started by $__file__:$__line__
*/
#include <cassert>
#include <iostream>
#include <string>
#include "base/misc.hh"
#include "mem/protocol/${{self.c_ident}}.hh"
using namespace std;
''')
if self.isStateDecl:
code('''
// Code to convert the current state to an access permission
AccessPermission ${{self.c_ident}}_to_permission(const ${{self.c_ident}}& obj)
{
switch(obj) {
''')
# For each case
code.indent()
for statePerm in self.statePermPairs:
code(' case ${{self.c_ident}}_${{statePerm[0]}}:')
code(' return AccessPermission_${{statePerm[1]}};')
code.dedent()
code ('''
default:
panic("Unknown state access permission converstion for ${{self.c_ident}}");
}
}
''')
if self.isMachineType:
for enum in self.enums.itervalues():
if enum.primary:
code('#include "mem/protocol/${{enum.ident}}_Controller.hh"')
code('#include "mem/ruby/common/MachineID.hh"')
code('''
// Code for output operator
ostream&
operator<<(ostream& out, const ${{self.c_ident}}& obj)
{
out << ${{self.c_ident}}_to_string(obj);
out << flush;
return out;
}
// Code to convert state to a string
string
${{self.c_ident}}_to_string(const ${{self.c_ident}}& obj)
{
switch(obj) {
''')
# For each field
code.indent()
for enum in self.enums.itervalues():
code(' case ${{self.c_ident}}_${{enum.ident}}:')
code(' return "${{enum.ident}}";')
code.dedent()
# Trailer
code('''
default:
panic("Invalid range for type ${{self.c_ident}}");
}
}
// Code to convert from a string to the enumeration
${{self.c_ident}}
string_to_${{self.c_ident}}(const string& str)
{
''')
# For each field
start = ""
code.indent()
for enum in self.enums.itervalues():
code('${start}if (str == "${{enum.ident}}") {')
code(' return ${{self.c_ident}}_${{enum.ident}};')
start = "} else "
code.dedent()
code('''
} else {
panic("Invalid string conversion for %s, type ${{self.c_ident}}", str);
}
}
// Code to increment an enumeration type
${{self.c_ident}}&
operator++(${{self.c_ident}}& e)
{
assert(e < ${{self.c_ident}}_NUM);
return e = ${{self.c_ident}}(e+1);
}
''')
# MachineType hack used to set the base level and number of
# components for each Machine
if self.isMachineType:
code('''
/** \\brief returns the base vector index for each machine type to be
* used by NetDest
*
* \\return the base vector index for each machine type to be used by NetDest
* \\see NetDest.hh
*/
int
${{self.c_ident}}_base_level(const ${{self.c_ident}}& obj)
{
switch(obj) {
''')
# For each field
code.indent()
for i,enum in enumerate(self.enums.itervalues()):
code(' case ${{self.c_ident}}_${{enum.ident}}:')
code(' return $i;')
code.dedent()
# total num
code('''
case ${{self.c_ident}}_NUM:
return ${{len(self.enums)}};
default:
panic("Invalid range for type ${{self.c_ident}}");
}
}
/** \\brief returns the machine type for each base vector index used by NetDest
*
* \\return the MachineType
*/
MachineType
${{self.c_ident}}_from_base_level(int type)
{
switch(type) {
''')
# For each field
code.indent()
for i,enum in enumerate(self.enums.itervalues()):
code(' case $i:')
code(' return ${{self.c_ident}}_${{enum.ident}};')
code.dedent()
# Trailer
code('''
default:
panic("Invalid range for type ${{self.c_ident}}");
}
}
/** \\brief The return value indicates the number of components created
* before a particular machine\'s components
*
* \\return the base number of components for each machine
*/
int
${{self.c_ident}}_base_number(const ${{self.c_ident}}& obj)
{
int base = 0;
switch(obj) {
''')
# For each field
code.indent()
code(' case ${{self.c_ident}}_NUM:')
for enum in reversed(self.enums.values()):
# Check if there is a defined machine with this type
if enum.primary:
code(' base += ${{enum.ident}}_Controller::getNumControllers();')
else:
code(' base += 0;')
code(' case ${{self.c_ident}}_${{enum.ident}}:')
code(' break;')
code.dedent()
code('''
default:
panic("Invalid range for type ${{self.c_ident}}");
}
return base;
}
/** \\brief returns the total number of components for each machine
* \\return the total number of components for each machine
*/
int
${{self.c_ident}}_base_count(const ${{self.c_ident}}& obj)
{
switch(obj) {
''')
# For each field
for enum in self.enums.itervalues():
code('case ${{self.c_ident}}_${{enum.ident}}:')
if enum.primary:
code('return ${{enum.ident}}_Controller::getNumControllers();')
else:
code('return 0;')
# total num
code('''
case ${{self.c_ident}}_NUM:
default:
panic("Invalid range for type ${{self.c_ident}}");
}
}
''')
for enum in self.enums.itervalues():
if enum.ident == "DMA":
code('''
MachineID
map_Address_to_DMA(const Addr &addr)
{
MachineID dma = {MachineType_DMA, 0};
return dma;
}
''')
code('''
MachineID
get${{enum.ident}}MachineID(NodeID RubyNode)
{
MachineID mach = {MachineType_${{enum.ident}}, RubyNode};
return mach;
}
''')
# Write the file
code.write(path, "%s.cc" % self.c_ident)
__all__ = [ "Type" ]
|
|
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import mock
from neutronclient.common import exceptions
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
from networking_cisco.neutronclient import hostingdevice
class CLITestV20HostingDevice(test_cli20.CLITestV20Base):
def setUp(self):
# need to mock before super because extensions loaded on instantiation
self._mock_extension_loading()
super(CLITestV20HostingDevice, self).setUp()
def _create_patch(self, name, func=None):
patcher = mock.patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def _mock_extension_loading(self):
ext_pkg = 'neutronclient.common.extension'
contrib = self._create_patch(ext_pkg + '._discover_via_entry_points')
contrib.return_value = [("hostingdevice", hostingdevice)]
return contrib
def test_ext_cmd_loaded(self):
shell.NeutronShell('2.0')
ext_cmd = {
'cisco-hosting-device-list': hostingdevice.HostingDeviceList,
'cisco-hosting-device-create': hostingdevice.HostingDeviceCreate,
'cisco-hosting-device-update': hostingdevice.HostingDeviceUpdate,
'cisco-hosting-device-delete': hostingdevice.HostingDeviceDelete,
'cisco-hosting-device-show': hostingdevice.HostingDeviceShow,
'cisco-hosting-device-get-config':
hostingdevice.HostingDeviceGetConfig}
self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
def test_ext_cmd_help_doc_with_extension_name(self):
shell.NeutronShell('2.0')
ext_cmd = {
'cisco-hosting-device-list': hostingdevice.HostingDeviceList,
'cisco-hosting-device-create': hostingdevice.HostingDeviceCreate,
'cisco-hosting-device-update': hostingdevice.HostingDeviceUpdate,
'cisco-hosting-device-delete': hostingdevice.HostingDeviceDelete,
'cisco-hosting-device-show': hostingdevice.HostingDeviceShow,
'cisco-hosting-device-get-config':
hostingdevice.HostingDeviceGetConfig}
self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
for item in ext_cmd:
cmdcls = shell.COMMANDS['2.0'].get(item)
self.assertTrue(cmdcls.__doc__.startswith("[hostingdevice]"))
def test_create_hosting_device(self):
"""Create hosting device."""
resource = 'hosting_device'
cmd = hostingdevice.HostingDeviceCreate(test_cli20.MyApp(sys.stdout),
None)
name = 'Device 1'
template = 'Template 1'
myid = 'myid'
args = [name, template]
position_names = ['name', 'template_id']
position_values = [name, template]
self._test_create_resource(resource, cmd, None, myid, args,
position_names, position_values)
def test_create_hosting_device_id(self):
"""Create hosting device: --id this_id "Device 1" "Template 1"."""
resource = 'hosting_device'
cmd = hostingdevice.HostingDeviceCreate(test_cli20.MyApp(sys.stdout),
None)
name = 'Device 1'
template = 'Template 1'
myid = 'myid'
args = ['--id', myid, name, template]
position_names = ['name', 'template_id']
position_values = [name, template]
self._test_create_resource(resource, cmd, None, myid, args,
position_names, position_values,
id=myid)
def test_create_hosting_device_tenant(self):
"""Create hosting device: --tenant_id tenantid "Device 1" "Template
1".
"""
resource = 'hosting_device'
cmd = hostingdevice.HostingDeviceCreate(test_cli20.MyApp(sys.stdout),
None)
name = 'Device 1'
template = 'Template 1'
myid = 'myid'
args = ['--tenant_id', 'tenantid', name, template]
position_names = ['name', 'template_id']
position_values = [name, template]
self._test_create_resource(resource, cmd, None, myid, args,
position_names, position_values,
tenant_id='tenantid')
def _test_create_hosting_device_optional_args(
self, creds_id=None, credsid=None, desc=None, device_id=None,
deviceid=None, mgmt_ip=None, mgmtip=None, mgmt_port=None,
mgmtport=None, proto_port=None, protoport=None, admin_down=None,
admindown=None, cfg_agt=None, cfgagt=None, tenant_bound=None,
tenantbound=None, auto_delete=None, autodelete=None):
resource = 'hosting_device'
cmd = hostingdevice.HostingDeviceCreate(test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
name = 'Device 1'
template = 'Template 1'
args = []
expected = {}
if creds_id is not None:
args += ['--credentials_id', creds_id]
expected['credentials_id'] = creds_id
if credsid is not None:
args += ['--credentials-id', credsid]
expected['credentials_id'] = credsid
if desc is not None:
args += ['--description', desc]
expected['description'] = desc
if device_id is not None:
args += ['--device_id', device_id]
expected['device_id'] = device_id
if deviceid is not None:
args += ['--device-id', deviceid]
expected['device_id'] = deviceid
if mgmt_ip is not None:
args += ['--management_ip_address', mgmt_ip]
expected['management_ip_address'] = mgmt_ip
if mgmtip is not None:
args += ['--management-ip-address', mgmtip]
expected['management_ip_address'] = mgmtip
if mgmt_port is not None:
args += ['--management_port', mgmt_port]
expected['management_port_id'] = mgmt_port
if mgmtport is not None:
args += ['--management-port', mgmtport]
expected['management_port_id'] = mgmtport
if proto_port is not None:
args += ['--protocol_port', proto_port]
expected['protocol_port'] = proto_port
if protoport is not None:
args += ['--protocol-port', protoport]
expected['protocol_port'] = protoport
if admin_down is not None:
args += ['--admin_state_down']
expected['admin_state_up'] = False
if admindown is not None:
args += ['--admin-state-down']
expected['admin_state_up'] = False
if cfg_agt is not None:
args += ['--cfg_agent_id', cfg_agt]
expected['cfg_agent_id'] = cfg_agt
if cfgagt is not None:
args += ['--cfg-agent-id', cfgagt]
expected['cfg_agent_id'] = cfgagt
if tenant_bound is not None:
args += ['--tenant_bound', tenant_bound]
expected['tenant_bound'] = (tenant_bound
if tenant_bound != "None" else None)
if tenantbound is not None:
args += ['--tenant-bound', tenantbound]
expected['tenant_bound'] = (tenantbound
if tenantbound != "None" else None)
if auto_delete is not None:
args += ['--auto_delete']
expected['auto_delete'] = True
if autodelete is not None:
args += ['--auto-delete']
expected['auto_delete'] = True
position_names = ['name', 'template_id']
position_values = [name, template]
for p_v in position_values:
args.append(p_v)
self._test_create_resource(resource, cmd, None, myid, args,
position_names, position_values,
**expected)
def test_create_hosting_device_creds(self):
self._test_create_hosting_device_optional_args(creds_id='some id')
self._test_create_hosting_device_optional_args(credsid='some id')
def test_create_hosting_device_description(self):
self._test_create_hosting_device_optional_args(desc='some description')
def test_create_hosting_device_device_id(self):
self._test_create_hosting_device_optional_args(device_id='ABC123abc')
self._test_create_hosting_device_optional_args(deviceid='ABC123abc')
def test_create_hosting_device_mgmt_ip(self):
self._test_create_hosting_device_optional_args(mgmt_ip='192.168.0.1')
self._test_create_hosting_device_optional_args(mgmtip='192.168.0.1')
def test_create_hosting_device_mgmt_port(self):
self._test_create_hosting_device_optional_args(mgmt_port='a_port_id')
self._test_create_hosting_device_optional_args(mgmtport='a_port_id')
def test_create_hosting_device_proto_port(self):
self._test_create_hosting_device_optional_args(proto_port='22')
self._test_create_hosting_device_optional_args(protoport='22')
def test_create_hosting_device_admin_down(self):
self._test_create_hosting_device_optional_args(admin_down=True)
self._test_create_hosting_device_optional_args(admindown=True)
def test_create_hosting_device_cfg_agent(self):
self._test_create_hosting_device_optional_args(cfg_agt='agent_1_id')
self._test_create_hosting_device_optional_args(cfgagt='agent_1_id')
def test_create_hosting_device_tenant_bound(self):
self._test_create_hosting_device_optional_args(tenant_bound='None')
self._test_create_hosting_device_optional_args(tenantbound='None')
self._test_create_hosting_device_optional_args(tenant_bound='')
self._test_create_hosting_device_optional_args(tenantbound='')
self._test_create_hosting_device_optional_args(tenant_bound='some id')
self._test_create_hosting_device_optional_args(tenantbound='some id')
def test_create_hosting_device_auto_delete(self):
self._test_create_hosting_device_optional_args(auto_delete=True)
self._test_create_hosting_device_optional_args(autodelete=True)
def test_create_hosting_device_full(self):
self._test_create_hosting_device_optional_args(
creds_id='some id', desc='some description', device_id='ABC123abc',
mgmt_ip='192.168.0.1', mgmt_port='a_port_id', proto_port='22',
admin_down=True, cfg_agt='agent_1_id', tenant_bound='None',
auto_delete=True)
self._test_create_hosting_device_optional_args(
credsid='some id', desc='some description', deviceid='ABC123abc',
mgmtip='192.168.0.1', mgmtport='a_port_id', protoport='22',
admindown=True, cfgagt='agent_1_id', tenantbound='None',
autodelete=True)
def test_list_hosting_devices_detail(self):
"""list hosting devices: -D."""
resources = "hosting_devices"
cmd = hostingdevice.HostingDeviceList(test_cli20.MyApp(sys.stdout),
None)
response_contents = [{
self.id_field: 'myid1', 'name': 'hosting_device_1_name',
'description': 'fast router device', 'template_id': 'templ_id_1',
'admin_state_up': True, 'status': 'ACTIVE'}, {
self.id_field: 'myid2', 'name': 'hosting_device_2_name',
'description': 'faster router device', 'template_id': 'templ_id_2',
'admin_state_up': True, 'status': 'ACTIVE'}]
self._test_list_resources(resources, cmd, True,
response_contents=response_contents)
def test_list_hosting_devices_sort(self):
"""list hosting devices: --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "hosting_devices"
cmd = hostingdevice.HostingDeviceList(test_cli20.MyApp(sys.stdout),
None)
response_contents = [{
self.id_field: 'myid1', 'name': 'hosting_device_1_name',
'description': 'fast router device', 'template_id': 'templ_id_1',
'admin_state_up': True, 'status': 'ACTIVE'}, {
self.id_field: 'myid2', 'name': 'hosting_device_2_name',
'description': 'faster router device', 'template_id': 'templ_id_2',
'admin_state_up': True, 'status': 'ACTIVE'}]
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"],
response_contents=response_contents)
def test_list_hosting_devices_limit(self):
"""list hosting devices: -P."""
resources = "hosting_devices"
cmd = hostingdevice.HostingDeviceList(test_cli20.MyApp(sys.stdout),
None)
response_contents = [{
self.id_field: 'myid1', 'name': 'hosting_device_1_name',
'description': 'fast router device', 'template_id': 'templ_id_1',
'admin_state_up': True, 'status': 'ACTIVE'}, {
self.id_field: 'myid2', 'name': 'hosting_device_2_name',
'description': 'faster router device', 'template_id': 'templ_id_2',
'admin_state_up': True, 'status': 'ACTIVE'}]
self._test_list_resources(resources, cmd, page_size=1000,
response_contents=response_contents)
def test_update_hosting_device_exception(self):
"""Update hosting device: myid."""
resource = 'hosting_device'
cmd = hostingdevice.HostingDeviceUpdate(test_cli20.MyApp(sys.stdout),
None)
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def _test_update_hosting_device(
self, creds_id=None, credsid=None, name=None, desc=None,
device_id=None, deviceid=None, mgmt_ip=None, mgmtip=None,
proto_port=None, protoport=None, admin_up=None, adminup=None,
admin_down=None, admindown=None, tenant_bound=None,
tenantbound=None, auto_delete=None, autodelete=None,
no_auto_delete=None, noautodelete=None):
resource = 'hosting_device'
cmd = hostingdevice.HostingDeviceUpdate(test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = [myid]
expected = {}
if creds_id is not None:
args += ['--credentials_id', creds_id]
expected['credentials_id'] = creds_id
if credsid is not None:
args += ['--credentials-id', credsid]
expected['credentials_id'] = credsid
if name is not None:
args += ['--name', name]
expected['name'] = name
if desc is not None:
args += ['--description', desc]
expected['description'] = desc
if device_id is not None:
args += ['--device_id', device_id]
expected['device_id'] = device_id
if deviceid is not None:
args += ['--device-id', deviceid]
expected['device_id'] = deviceid
if mgmt_ip is not None:
args += ['--management_ip_address', mgmt_ip]
expected['management_ip_address'] = mgmt_ip
if mgmtip is not None:
args += ['--management-ip-address', mgmtip]
expected['management_ip_address'] = mgmtip
if proto_port is not None:
args += ['--protocol_port', proto_port]
expected['protocol_port'] = proto_port
if protoport is not None:
args += ['--protocol-port', protoport]
expected['protocol_port'] = protoport
if admin_up is not None:
args += ['--admin_state_up']
expected['admin_state_up'] = True
if adminup is not None:
args += ['--admin-state-up']
expected['admin_state_up'] = True
if admin_down is not None:
args += ['--admin_state_down']
expected['admin_state_up'] = False
if admindown is not None:
args += ['--admin-state-down']
expected['admin_state_up'] = False
if tenant_bound is not None:
args += ['--tenant_bound', tenant_bound]
expected['tenant_bound'] = (tenant_bound
if tenant_bound != "None" else None)
if tenantbound is not None:
args += ['--tenant-bound', tenantbound]
expected['tenant_bound'] = (tenantbound
if tenantbound != "None" else None)
if auto_delete is not None:
args += ['--auto_delete']
expected['auto_delete'] = True
if autodelete is not None:
args += ['--auto-delete']
expected['auto_delete'] = True
if no_auto_delete is not None:
args += ['--no_auto_delete']
expected['auto_delete'] = False
if noautodelete is not None:
args += ['--no-auto-delete']
expected['auto_delete'] = False
self._test_update_resource(resource, cmd, myid, args, expected)
def test_update_hosting_device_creds(self):
self._test_update_hosting_device(creds_id='some id')
self._test_update_hosting_device(credsid='some id')
def test_update_hosting_device_name(self):
"""Update hosting device: myid --name myname."""
self._test_update_hosting_device(name='myname')
def test_update_hosting_device_description(self):
self._test_update_hosting_device(desc='some description')
def test_update_hosting_device_device_id(self):
self._test_update_hosting_device(device_id='ABC123abc')
self._test_update_hosting_device(deviceid='ABC123abc')
def test_update_hosting_device_mgmt_ip(self):
self._test_update_hosting_device(mgmt_ip='192.168.0.1')
self._test_update_hosting_device(mgmtip='192.168.0.1')
def test_update_hosting_device_proto_port(self):
self._test_update_hosting_device(proto_port='22')
self._test_update_hosting_device(protoport='22')
def test_update_hosting_device_admin_state(self):
self._test_update_hosting_device(admin_up=True)
self._test_update_hosting_device(adminup=True)
self._test_update_hosting_device(admin_down=True)
self._test_update_hosting_device(admindown=True)
def test_update_hosting_device_tenant_bound(self):
self._test_update_hosting_device(tenant_bound='None')
self._test_update_hosting_device(tenantbound='None')
self._test_update_hosting_device(tenant_bound='')
self._test_update_hosting_device(tenantbound='')
self._test_update_hosting_device(tenant_bound='some id')
self._test_update_hosting_device(tenantbound='some id')
def test_update_hosting_device_auto_delete(self):
self._test_update_hosting_device(no_auto_delete=True)
self._test_update_hosting_device(noautodelete=True)
self._test_update_hosting_device(auto_delete=True)
self._test_update_hosting_device(autodelete=True)
def test_delete_hosting_device(self):
"""Delete hosting device: myid."""
resource = 'hosting_device'
cmd = hostingdevice.HostingDeviceDelete(test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_show_hosting_device(self):
"""Show hosting device: myid."""
resource = 'hosting_device'
cmd = hostingdevice.HostingDeviceShow(test_cli20.MyApp(sys.stdout),
None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
def test_get_hosting_device_config(self):
"""Get config of hosting device: myid."""
resource = 'hosting_device'
cmd = hostingdevice.HostingDeviceGetConfig(
test_cli20.MyApp(sys.stdout), None)
args = [self.test_id]
attr = resource + "_path"
p = getattr(self.client, attr)
setattr(self.client, attr, p + hostingdevice.HOSTING_DEVICE_CONFIG)
self._test_show_resource(resource, cmd, self.test_id, args)
|
|
"""
SoftLayer.tests.api_tests
~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import mock
import SoftLayer
import SoftLayer.API
from SoftLayer import testing
from SoftLayer import transports
class Inititialization(testing.TestCase):
def test_init(self):
client = SoftLayer.Client(username='doesnotexist',
api_key='issurelywrong',
timeout=10)
self.assertIsInstance(client.auth, SoftLayer.BasicAuthentication)
self.assertEqual(client.auth.username, 'doesnotexist')
self.assertEqual(client.auth.api_key, 'issurelywrong')
self.assertIsNotNone(client.transport)
self.assertIsInstance(client.transport, transports.XmlRpcTransport)
self.assertEqual(client.transport.timeout, 10)
@mock.patch('SoftLayer.config.get_client_settings')
def test_env(self, get_client_settings):
auth = mock.Mock()
get_client_settings.return_value = {
'timeout': 10,
'endpoint_url': 'http://endpoint_url/',
}
client = SoftLayer.Client(auth=auth)
self.assertEqual(client.auth.get_headers(), auth.get_headers())
self.assertEqual(client.transport.timeout, 10)
self.assertEqual(client.transport.endpoint_url, 'http://endpoint_url')
class ClientMethods(testing.TestCase):
def test_repr(self):
client = SoftLayer.Client(
username='doesnotexist',
api_key='issurelywrong'
)
self.assertIn("Client", repr(client))
def test_service_repr(self):
client = SoftLayer.Client(
username='doesnotexist',
api_key='issurelywrong'
)
self.assertIn("Service", repr(client['SERVICE']))
def test_len(self):
client = SoftLayer.Client(
username='doesnotexist',
api_key='issurelywrong'
)
self.assertEqual(len(client), 0)
class APIClient(testing.TestCase):
def test_simple_call(self):
mock = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mock.return_value = {"test": "result"}
resp = self.client['SERVICE'].METHOD()
self.assertEqual(resp, {"test": "result"})
self.assert_called_with('SoftLayer_SERVICE', 'METHOD',
mask=None,
filter=None,
identifier=None,
args=tuple(),
limit=None,
offset=None,
)
def test_complex(self):
mock = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mock.return_value = {"test": "result"}
_filter = {'TYPE': {'attribute': {'operation': '^= prefix'}}}
resp = self.client['SERVICE'].METHOD(
1234,
id=5678,
mask={'object': {'attribute': ''}},
headers={'header': 'value'},
raw_headers={'RAW': 'HEADER'},
filter=_filter,
limit=9,
offset=10)
self.assertEqual(resp, {"test": "result"})
self.assert_called_with('SoftLayer_SERVICE', 'METHOD',
mask={'object': {'attribute': ''}},
filter=_filter,
identifier=5678,
args=(1234,),
limit=9,
offset=10,
)
calls = self.calls('SoftLayer_SERVICE', 'METHOD')
self.assertEqual(len(calls), 1)
self.assertIn('header', calls[0].headers)
self.assertEqual(calls[0].headers['header'], 'value')
@mock.patch('SoftLayer.API.BaseClient.iter_call')
def test_iterate(self, _iter_call):
self.client['SERVICE'].METHOD(iter=True)
_iter_call.assert_called_with('SERVICE', 'METHOD')
@mock.patch('SoftLayer.API.BaseClient.iter_call')
def test_service_iter_call(self, _iter_call):
self.client['SERVICE'].iter_call('METHOD', 'ARG')
_iter_call.assert_called_with('SERVICE', 'METHOD', 'ARG')
@mock.patch('SoftLayer.API.BaseClient.iter_call')
def test_service_iter_call_with_chunk(self, _iter_call):
self.client['SERVICE'].iter_call('METHOD', 'ARG', chunk=2)
_iter_call.assert_called_with('SERVICE', 'METHOD', 'ARG', chunk=2)
@mock.patch('SoftLayer.API.BaseClient.call')
def test_iter_call(self, _call):
# chunk=100, no limit
_call.side_effect = [list(range(100)), list(range(100, 125))]
result = list(self.client.iter_call('SERVICE', 'METHOD', iter=True))
self.assertEqual(list(range(125)), result)
_call.assert_has_calls([
mock.call('SERVICE', 'METHOD', limit=100, iter=False, offset=0),
mock.call('SERVICE', 'METHOD', limit=100, iter=False, offset=100),
])
_call.reset_mock()
# chunk=100, no limit. Requires one extra request.
_call.side_effect = [list(range(100)), list(range(100, 200)), []]
result = list(self.client.iter_call('SERVICE', 'METHOD', iter=True))
self.assertEqual(list(range(200)), result)
_call.assert_has_calls([
mock.call('SERVICE', 'METHOD', limit=100, iter=False, offset=0),
mock.call('SERVICE', 'METHOD', limit=100, iter=False, offset=100),
mock.call('SERVICE', 'METHOD', limit=100, iter=False, offset=200),
])
_call.reset_mock()
# chunk=25, limit=30
_call.side_effect = [list(range(0, 25)), list(range(25, 30))]
result = list(self.client.iter_call(
'SERVICE', 'METHOD', iter=True, limit=30, chunk=25))
self.assertEqual(list(range(30)), result)
_call.assert_has_calls([
mock.call('SERVICE', 'METHOD', iter=False, limit=25, offset=0),
mock.call('SERVICE', 'METHOD', iter=False, limit=5, offset=25),
])
_call.reset_mock()
# A non-list was returned
_call.side_effect = ["test"]
result = list(self.client.iter_call('SERVICE', 'METHOD', iter=True))
self.assertEqual(["test"], result)
_call.assert_has_calls([
mock.call('SERVICE', 'METHOD', iter=False, limit=100, offset=0),
])
_call.reset_mock()
# chunk=25, limit=30, offset=12
_call.side_effect = [list(range(0, 25)), list(range(25, 30))]
result = list(self.client.iter_call('SERVICE', 'METHOD', 'ARG',
iter=True,
limit=30,
chunk=25,
offset=12))
self.assertEqual(list(range(30)), result)
_call.assert_has_calls([
mock.call('SERVICE', 'METHOD', 'ARG',
iter=False, limit=25, offset=12),
mock.call('SERVICE', 'METHOD', 'ARG',
iter=False, limit=5, offset=37),
])
# Chunk size of 0 is invalid
self.assertRaises(
AttributeError,
lambda: list(self.client.iter_call('SERVICE', 'METHOD',
iter=True, chunk=0)))
def test_call_invalid_arguments(self):
self.assertRaises(
TypeError,
self.client.call, 'SERVICE', 'METHOD', invalid_kwarg='invalid')
def test_call_compression_disabled(self):
mocked = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mocked.return_value = {}
self.client['SERVICE'].METHOD(compress=False)
calls = self.calls('SoftLayer_SERVICE', 'METHOD')
self.assertEqual(len(calls), 1)
headers = calls[0].transport_headers
self.assertEqual(headers.get('accept-encoding'), 'identity')
def test_call_compression_enabled(self):
mocked = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mocked.return_value = {}
self.client['SERVICE'].METHOD(compress=True)
calls = self.calls('SoftLayer_SERVICE', 'METHOD')
self.assertEqual(len(calls), 1)
headers = calls[0].transport_headers
self.assertEqual(headers.get('accept-encoding'),
'gzip, deflate, compress')
def test_call_compression_override(self):
# raw_headers should override compress=False
mocked = self.set_mock('SoftLayer_SERVICE', 'METHOD')
mocked.return_value = {}
self.client['SERVICE'].METHOD(
compress=False,
raw_headers={'Accept-Encoding': 'gzip'})
calls = self.calls('SoftLayer_SERVICE', 'METHOD')
self.assertEqual(len(calls), 1)
headers = calls[0].transport_headers
self.assertEqual(headers.get('accept-encoding'), 'gzip')
class UnauthenticatedAPIClient(testing.TestCase):
def set_up(self):
self.client = SoftLayer.Client(endpoint_url="ENDPOINT")
@mock.patch('SoftLayer.config.get_client_settings')
def test_init(self, get_client_settings):
get_client_settings.return_value = {}
client = SoftLayer.Client()
self.assertIsNone(client.auth)
@mock.patch('SoftLayer.config.get_client_settings')
def test_init_with_proxy(self, get_client_settings):
get_client_settings.return_value = {'proxy': 'http://localhost:3128'}
client = SoftLayer.Client()
self.assertEqual(client.transport.proxy, 'http://localhost:3128')
@mock.patch('SoftLayer.API.BaseClient.call')
def test_authenticate_with_password(self, _call):
_call.return_value = {
'userId': 12345,
'hash': 'TOKEN',
}
self.client.authenticate_with_password('USERNAME', 'PASSWORD')
_call.assert_called_with(
'User_Customer',
'getPortalLoginToken',
'USERNAME',
'PASSWORD',
None,
None)
self.assertIsNotNone(self.client.auth)
self.assertEqual(self.client.auth.user_id, 12345)
self.assertEqual(self.client.auth.auth_token, 'TOKEN')
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a parser for the Mac OS X MacKeeper cache database."""
import json
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
def DictToList(data_dict):
"""Take a dict object and return a list of strings back."""
ret_list = []
for key, value in data_dict.iteritems():
if key in ('body', 'datetime', 'type', 'room', 'rooms', 'id'):
continue
ret_list.append(u'{0:s} = {1!s}'.format(key, value))
return ret_list
def ExtractJQuery(jquery_raw):
"""Extract and return the data inside a JQuery as a dict object."""
data_part = u''
if not jquery_raw:
return {}
if '[' in jquery_raw:
_, _, first_part = jquery_raw.partition('[')
data_part, _, _ = first_part.partition(']')
elif jquery_raw.startswith('//'):
_, _, first_part = jquery_raw.partition('{')
data_part = u'{{{0:s}'.format(first_part)
elif '({' in jquery_raw:
_, _, first_part = jquery_raw.partition('(')
data_part, _, _ = first_part.rpartition(')')
if not data_part:
return {}
try:
data_dict = json.loads(data_part)
except ValueError:
return {}
return data_dict
def ParseChatData(data):
"""Parse a chat comment data dict and return a parsed one back.
Args:
data: A dict object that is parsed from the record.
Returns:
A dict object to store the results in.
"""
data_store = {}
if 'body' in data:
body = data.get('body', '').replace('\n', ' ')
if body.startswith('//') and '{' in body:
body_dict = ExtractJQuery(body)
title, _, _ = body.partition('{')
body = u'{0:s} <{1!s}>'.format(title[2:], DictToList(body_dict))
else:
body = 'No text.'
data_store['text'] = body
room = data.get('rooms', None)
if not room:
room = data.get('room', None)
if room:
data_store['room'] = room
data_store['id'] = data.get('id', None)
user = data.get('user', None)
if user:
try:
user_sid = int(user)
data_store['sid'] = user_sid
except (ValueError, TypeError):
data_store['user'] = user
return data_store
class MacKeeperCacheEvent(event.EventObject):
"""Convenience class for a MacKeeper Cache event."""
DATA_TYPE = 'mackeeper:cache'
def __init__(self, timestamp, description, identifier, url, data_dict):
"""Initializes the event object.
Args:
timestamp: A timestamp as a number of milliseconds since Epoch
or as a UTC string.
description: The description of the cache entry.
identifier: The row identifier.
url: The MacKeeper URL value that is stored in every event.
data_dict: A dict object with the descriptive information.
"""
super(MacKeeperCacheEvent, self).__init__()
# Two different types of timestamps stored in log files.
if type(timestamp) in (int, long):
self.timestamp = timelib.Timestamp.FromJavaTime(timestamp)
else:
self.timestamp = timelib.Timestamp.FromTimeString(timestamp)
self.timestamp_desc = eventdata.EventTimestamp.ADDED_TIME
self.description = description
self.offset = identifier
self.text = data_dict.get('text', None)
self.user_sid = data_dict.get('sid', None)
self.user_name = data_dict.get('user', None)
self.event_type = data_dict.get('event_type', None)
self.room = data_dict.get('room', None)
self.record_id = data_dict.get('id', None)
self.url = url
class MacKeeperCachePlugin(interface.SQLitePlugin):
"""Plugin for the MacKeeper Cache database file."""
NAME = 'mackeeper_cache'
DESCRIPTION = u'Parser for MacKeeper Cache SQLite database files.'
# Define the needed queries.
QUERIES = [((
'SELECT d.entry_ID AS id, d.receiver_data AS data, r.request_key, '
'r.time_stamp AS time_string FROM cfurl_cache_receiver_data d, '
'cfurl_cache_response r WHERE r.entry_ID = '
'd.entry_ID'), 'ParseReceiverData')]
# The required tables.
REQUIRED_TABLES = frozenset([
'cfurl_cache_blob_data', 'cfurl_cache_receiver_data',
'cfurl_cache_response'])
def ParseReceiverData(self, parser_context, row, query=None, **unused_kwargs):
"""Parses a single row from the receiver and cache response table.
Args:
parser_context: A parser context object (instance of ParserContext).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
data = {}
key_url = row['request_key']
data_dict = {}
description = 'MacKeeper Entry'
# Check the URL, since that contains vital information about the type of
# event we are dealing with.
if key_url.endswith('plist'):
description = 'Configuration Definition'
data['text'] = 'Plist content added to cache.'
elif key_url.startswith('http://event.zeobit.com'):
description = 'MacKeeper Event'
try:
_, _, part = key_url.partition('?')
data['text'] = part.replace('&', ' ')
except UnicodeDecodeError:
data['text'] = 'N/A'
elif key_url.startswith('http://account.zeobit.com'):
description = 'Account Activity'
_, _, activity = key_url.partition('#')
if activity:
data['text'] = u'Action started: {0:s}'.format(activity)
else:
data['text'] = u'Unknown activity.'
elif key_url.startswith('http://support.') and 'chat' in key_url:
description = 'Chat '
try:
jquery = unicode(row['data'])
except UnicodeDecodeError:
jquery = ''
data_dict = ExtractJQuery(jquery)
data = ParseChatData(data_dict)
data['entry_type'] = data_dict.get('type', '')
if data['entry_type'] == 'comment':
description += 'Comment'
elif data['entry_type'] == 'outgoing':
description += 'Outgoing Message'
elif data['entry_type'] == 'incoming':
description += 'Incoming Message'
else:
# Empty or not known entry type, generic status message.
description += 'Entry'
data['text'] = u';'.join(DictToList(data_dict))
if not data['text']:
data['text'] = 'No additional data.'
event_object = MacKeeperCacheEvent(
row['time_string'], description, row['id'], key_url, data)
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
sqlite.SQLiteParser.RegisterPlugin(MacKeeperCachePlugin)
|
|
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from stoplight import validation_function, Rule, validate
from stoplight.exceptions import ValidationFailed, ValidationProgrammingError
# TODO(tonytan4ever): We probably want to move this to a
# test helpers library
VALIDATED_STR = 'validated'
@validation_function
def is_upper(z):
"""Simple validation function for testing purposes
that ensures that input is all caps
"""
if z.upper() != z:
raise ValidationFailed('{0} no uppercase'.format(z))
error_count = 0
def abort(code):
global error_count
error_count = error_count + 1
other_vals = dict()
get_other_val = other_vals.get
class DummyRequest(object):
def __init__(self):
self.headers = dict(header1='headervalue1')
class DummyResponse(object):
pass
@validation_function
def is_request(candidate):
if not isinstance(candidate, DummyRequest):
raise ValidationFailed('Input must be a request')
@validation_function
def is_response(candidate):
if not isinstance(candidate, DummyResponse):
raise ValidationFailed('Input must be a response')
RequestRule = Rule(is_request(), lambda: abort(404))
ResponseRule = Rule(is_response(), lambda: abort(404))
UppercaseRule = Rule(is_upper(), lambda: abort(404))
class DummyEndpoint(object):
# This should throw a ValidationProgrammingError
# when called because the user did not actually
# call validate_upper.
# Note: the lambda in this function can never actually be
# called, so we use no cover here
@validate(value=Rule(is_upper, lambda: abort(404))) # pragma: no cover
def get_value_programming_error(self, value):
# This function body should never be
# callable since the validation error
# should not allow it to be called
assert False # pragma: no cover
@validate(
value1=Rule(is_upper(), lambda: abort(404)),
value2=Rule(is_upper(), lambda: abort(404)),
value3=Rule(is_upper(), lambda: abort(404))
) # pragma: no cover
def get_value_happy_path(self, value1, value2, value3):
return value1 + value2 + value3
@validate(
value1=Rule(is_upper(), lambda: abort(404)),
value2=Rule(is_upper(empty_ok=True), lambda: abort(404),
get_other_val),
) # pragma: no cover
def get_value_with_getter(self, value1):
global other_vals
return value1 + other_vals.get('value2')
# Falcon-style endpoint
@validate(
request=Rule(is_request(), lambda: abort(404)),
response=Rule(is_response(), lambda: abort(404)),
value=Rule(is_upper(), lambda: abort(404))
)
def get_falcon_style(self, request, response, value):
return value
# Falcon-style w/ declared rules
@validate(request=RequestRule, response=ResponseRule,
value=UppercaseRule)
def get_falcon_with_declared_rules(self, request, response, value):
return value
class TestValidationFunction(TestCase):
def test_empty_ok(self):
is_upper(empty_ok=True)('')
with self.assertRaises(ValidationFailed):
is_upper()('')
is_upper(none_ok=True)(None)
with self.assertRaises(ValidationFailed):
is_upper()(None)
class TestValidationDecorator(TestCase):
def setUp(self):
self.ep = DummyEndpoint()
def test_programming_error(self):
with self.assertRaises(ValidationProgrammingError):
self.ep.get_value_programming_error('AT_ME')
def test_falcon_style(self):
global error_count
request = DummyRequest()
response = DummyResponse()
# Try to call with missing params. The validation
# function should never get called
oldcount = error_count
self.ep.get_falcon_style(response, 'HELLO')
self.assertEqual(oldcount + 1, error_count)
# Try to pass a string to a positional argument
# where a response is expected
oldcount = error_count
self.ep.get_falcon_style(request, "bogusinput", 'HELLO')
self.assertEqual(oldcount + 1, error_count)
# Pass in as kwvalues with good input but out of
# typical order (should succeed)
oldcount = error_count
self.ep.get_falcon_style(response=response, value='HELLO',
request=request)
self.assertEqual(oldcount, error_count)
# Pass in as kwvalues with good input but out of
# typical order with an invalid value (lower-case 'h')
oldcount = error_count
self.ep.get_falcon_style(response=response, value='hELLO',
request=request)
self.assertEqual(oldcount + 1, error_count)
# Pass in as kwvalues with good input but out of typical order
# and pass an invalid value. Note that here the response is
# assigned to request, etc.
oldcount = error_count
self.ep.get_falcon_style(response=request, value='HELLO',
request=response)
self.assertEqual(oldcount + 1, error_count)
# Happy path
oldcount = error_count
self.ep.get_falcon_style(request, response, 'HELLO')
self.assertEqual(oldcount, error_count)
def test_falcon_style_declared_rules(self):
# The following tests repeat the above
# tests, but this time they test using the
# endpoint with the rules being declared
# separately. See get_falcon_with_declared_rules above
global error_count
request = DummyRequest()
response = DummyResponse()
# Try to call with missing params. The validation
# function should never get called
oldcount = error_count
self.ep.get_falcon_with_declared_rules(response, 'HELLO')
self.assertEqual(oldcount + 1, error_count)
# Try to pass a string to a positional argument
# where a response is expected
oldcount = error_count
self.ep.get_falcon_with_declared_rules(request, "bogusinput", 'HELLO')
self.assertEqual(oldcount + 1, error_count)
# Pass in as kwvalues with good input but out of
# typical order (should succeed)
oldcount = error_count
self.ep.get_falcon_with_declared_rules(
response=response,
value='HELLO',
request=request)
self.assertEqual(oldcount, error_count)
# Pass in as kwvalues with good input but out of
# typical order with an invalid value (lower-case 'h')
oldcount = error_count
self.ep.get_falcon_with_declared_rules(
response=response,
value='hELLO',
request=request)
self.assertEqual(oldcount + 1, error_count)
# Pass in as kwvalues with good input but out of typical order
# and pass an invalid value. Note that here the response is
# assigned to request, etc.
oldcount = error_count
self.ep.get_falcon_with_declared_rules(response=request, value='HELLO',
request=response)
self.assertEqual(oldcount + 1, error_count)
# Happy path
oldcount = error_count
self.ep.get_falcon_with_declared_rules(request, response, 'HELLO')
self.assertEqual(oldcount, error_count)
def test_happy_path_and_validation_failure(self):
global error_count
# Should not throw
res = self.ep.get_value_happy_path('WHATEVER', 'HELLO', 'YES')
self.assertEqual('WHATEVERHELLOYES', res)
# Validation should have failed, and
# we should have seen a tick in the error count
oldcount = error_count
res = self.ep.get_value_happy_path('WHAtEVER', 'HELLO', 'YES')
self.assertEqual(oldcount + 1, error_count)
# Check passing a None value. This decorator does
# not permit none values.
oldcount = error_count
res = self.ep.get_value_happy_path(None, 'HELLO', 'YES')
self.assertEqual(oldcount + 1, error_count)
def test_getter(self):
global other_vals
other_vals['value2'] = 'HELLO'
# Now have our validation actually try to
# get those values
# This should succeed
res = self.ep.get_value_with_getter('TEST')
self.assertEqual('TESTHELLO', res)
# check empty_ok
other_vals['value2'] = ''
res = self.ep.get_value_with_getter('TEST')
self.assertEqual('TEST', res)
|
|
#!/usr/bin/env python
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epoll', 'poll', 'poll-cv']
}
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception("Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
flake_retries=5 if flaky or args.allow_flakes else 0,
timeout_retries=3 if args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
if tgt['language'] == test_lang and
platform_string() in tgt[platforms_str] and
not (travis and tgt['flaky'])]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple(
'_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
'venv_relative_python', 'toolchain', 'runner'])
def _python_config_generator(name, major, minor, bits, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
elif self.platform == 'windows':
self._use_cmake = False
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(self.args.arch)]
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV '
try:
cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += ['EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True)
else ['all'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/lib/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY': polling_strategy,
'GRPC_VERBOSITY': 'DEBUG'}
shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
timeout_scaling = 1
if polling_strategy == 'poll-cv':
timeout_scaling *= 5
if polling_strategy in target.get('excluded_poll_engines', []):
continue
# Scale overall test timeout if running under various sanitizers.
config = self.args.config
if ('asan' in config
or config == 'msan'
or config == 'tsan'
or config == 'ubsan'
or config == 'helgrind'
or config == 'memcheck'):
timeout_scaling *= 20
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
if self._use_cmake:
binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[self.config.build_config], target['name'])
else:
binary = 'vsprojects/%s%s/%s.exe' % (
'x64/' if self.args.arch == 'x64' else '',
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
# complete list of the tests contained in a binary
# for each test, we then add a job to run, filtering for just that
# test
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--gtest_list_tests'],
stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
out.append(self.config.job_spec(cmdline,
shortname='%s %s' % (' '.join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
out.append(self.config.job_spec(cmdline,
shortname=' '.join(
pipes.quote(arg)
for arg in cmdline) +
shortname_ext,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target]
def make_options(self):
return self._make_options;
def pre_build_steps(self):
if self._use_cmake:
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_c.bat']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
return ['CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix]
def _gcc_make_options(self, version_suffix):
return ['CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.4':
return ('wheezy', self._gcc_make_options(version_suffix='-4.4'))
elif compiler == 'gcc4.6':
return ('wheezy', self._gcc_make_options(version_suffix='-4.6'))
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
class NodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6',
'node7', 'electron1.3'])
if args.iomgr_platform == "uv":
self.use_uv = True
else:
self.use_uv = False
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '7'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
if self.platform == 'windows':
return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
else:
run_script = 'run_node'
if self.runtime == 'electron':
run_script += '_electron'
return [self.config.job_spec(['tools/run_tests/helper_scripts/{}.sh'.format(run_script),
self.node_version],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
build_script = 'pre_build_node'
if self.runtime == 'electron':
build_script += '_electron'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script),
self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
if self.config == 'dbg':
config_flag = '--debug'
else:
config_flag = '--release'
return [['tools\\run_tests\\helper_scripts\\build_node.bat',
'--grpc_uv={}'.format('true' if self.use_uv else 'false'),
config_flag]]
else:
build_script = 'build_node'
if self.runtime == 'electron':
build_script += '_electron'
# building for electron requires a patch version
self.node_version += '.0'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script),
self.node_version,
'--grpc_uv={}'.format('true' if self.use_uv else 'false')]]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(collections.namedtuple('PythonConfig', [
'name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [self.config.job_spec(
config.run,
timeout_seconds=5*60,
environ=dict(list(environment.items()) +
[('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),)
for suite_name in tests_json
for config in self.pythons]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
def python_manager_name(self):
return 'pyenv' if self.args.compiler in ['python3.5', 'python3.6'] else 'jessie'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
venv_relative_python, toolchain, runner)
python27_config = _python_config_generator(name='py27', major='2',
minor='7', bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(name='py34', major='3',
minor='4', bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(name='py35', major='3',
minor='5', bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(name='py36', major='3',
minor='6', bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(name='pypy', major='2',
config_vars=config_vars)
pypy32_config = _pypy_config_generator(name='pypy3', major='3',
config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python27_config,)
else:
return (python27_config, python34_config,)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['coreclr', 'default'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64' if self.args.compiler == 'coreclr' else 'Win32'
self._make_options = []
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
if self.platform == 'linux' and self.args.compiler == 'coreclr':
self._docker_distro = 'coreclr'
else:
self._docker_distro = 'jessie'
if self.platform == 'mac':
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
self._make_options = ['EMBED_OPENSSL=true']
if self.args.compiler != 'coreclr':
# On Mac, official distribution of mono is 32bit.
self._make_options += ['CFLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
nunit_args += ['--noresult', '--workers=1']
if self.platform == 'windows':
runtime_cmd = []
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
specs.append(self.config.job_spec(cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file,
'-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*',
'-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(self.config.job_spec(cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat', self._cmake_arch_option]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return self._make_options;
def build_steps(self):
if self.args.compiler == 'coreclr':
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp_coreclr.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp_coreclr.sh']]
else:
if self.platform == 'windows':
return [['vsprojects\\build_vs2015.bat',
'src/csharp/Grpc.sln',
'/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60*60,
shortname='objc-tests',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(['src/objective-c/tests/build_example_test.sh'],
timeout_seconds=30*60,
shortname='objc-examples-build',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ={'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
return [self.config.job_spec(cmd['script'].split(),
timeout_seconds=30*60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
class NodeExpressLanguage(object):
"""Dummy Node express test target to enable running express performance
benchmarks"""
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6'])
if self.args.compiler == 'default':
self.node_version = '4'
else:
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
return []
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node_express'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'node': NodeLanguage(),
'node_express': NodeExpressLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc' : ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.' % arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' % args.arch)
sys.exit(1)
def _windows_build_bat(compiler):
"""Returns name of build.bat for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013':
return 'vsprojects\\build_vs2013.bat'
elif compiler == 'vs2015':
return 'vsprojects\\build_vs2015.bat'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _windows_toolset_option(compiler):
"""Returns msbuild PlatformToolset for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
return '/p:PlatformToolset=v120'
elif compiler == 'vs2015':
return '/p:PlatformToolset=v140'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument('-c', '--config',
choices=sorted(_CONFIGS.keys()),
default='opt')
argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument('-p', '--sample_percent', default=100.0, type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument('-f', '--forever',
default=False,
action='store_const',
const=True)
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--newline_on_success',
default=False,
action='store_const',
const=True)
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument('-S', '--stop_on_failure',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument('--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
argp.add_argument('--arch',
choices=['default', 'x86', 'x64'],
default='default',
help='Selects architecture to target. For some platforms "default" is the only supported choice.')
argp.add_argument('--compiler',
choices=['default',
'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
'vs2013', 'vs2015',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3',
'node0.12', 'node4', 'node5', 'node6', 'node7',
'electron1.3',
'coreclr',
'cmake'],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.')
argp.add_argument('--iomgr_platform',
choices=['native', 'uv'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but dont run any tests.')
argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
help='Measure the cpu costs of tests')
argp.add_argument('--update_submodules', default=[], nargs='*',
help='Update some submodules before building. If any are updated, also run generate_projects. ' +
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument('--quiet_success',
default=False,
action='store_const',
const=True,
help='Dont print anything when a test passes. Passing tests also will not be reported in XML report. ' +
'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist')
args = argp.parse_args()
if args.force_default_poller:
_POLLING_STRATEGIES = {}
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print('WARNING: may need to regenerate projects, but since we are not on')
print(' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
lang_list = _LANGUAGES.keys()
else:
lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
for bad in ['objc', 'sanity']:
if bad in lang_list:
lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
l.configure(run_config, args)
language_make_options=[]
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print('languages with custom make options cannot be built simultaneously with other languages')
sys.exit(1)
else:
language_make_options = next(iter(languages)).make_options()
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print ('Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print ('Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call('tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
if makefile.startswith('cmake/build/'):
return [jobset.JobSpec(['cmake', '--build', '.',
'--target', '%s' % target,
'--config', _MSBUILD_CONFIG[cfg]],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets]
extra_args = []
# better do parallel compilation
# empirically /m:2 gives the best performance/price and should prevent
# overloading the windows workers.
extra_args.extend(['/m:2'])
# disable PDB generation: it's broken, and we don't need it during CI
extra_args.extend(['/p:Jenkins=true'])
return [
jobset.JobSpec([_windows_build_bat(args.compiler),
'vsprojects\\%s.sln' % target,
'/p:Configuration=%s' % _MSBUILD_CONFIG[cfg]] +
extra_args +
language_make_options,
shell=True, timeout_seconds=None)
for target in targets]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-j', '%d' % args.jobs] +
targets,
cwd='cmake/build',
timeout_seconds=None)]
if targets:
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-f', makefile,
'-j', '%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
'CONFIG=%s' % cfg] +
language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) +
targets,
timeout_seconds=None)]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=5)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
# returns a list of things that failed (or an empty list on success)
def _build_and_run(
check_cancelled, newline_on_success, xml_report=None, build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
return []
# start antagonists
antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec
for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent/100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if xml_report and resultset:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message('SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
|
|
# Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume driver for Dell Storage Center."""
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.dell import dell_storagecenter_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
@interface.volumedriver
class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver,
driver.FibreChannelDriver):
"""Implements commands for Dell Storage Center FC management.
To enable the driver add the following line to the cinder configuration:
volume_driver=cinder.volume.drivers.dell.dell_storagecenter_fc.\
DellStorageCenterFCDriver
Version history:
.. code-block:: none
1.0.0 - Initial driver
1.1.0 - Added extra spec support for Storage Profile selection
1.2.0 - Added consistency group support.
2.0.0 - Switched to inheriting functional objects rather than volume
driver.
2.1.0 - Added support for ManageableVD.
2.2.0 - Driver retype support for switching volume's Storage Profile
2.3.0 - Added Legacy Port Mode Support
2.3.1 - Updated error handling.
2.4.0 - Added Replication V2 support.
2.4.1 - Updated Replication support to V2.1.
2.5.0 - ManageableSnapshotsVD implemented.
3.0.0 - ProviderID utilized.
3.1.0 - Failback supported.
3.2.0 - Live Volume support.
"""
VERSION = '3.2.0'
CI_WIKI_NAME = "Dell_Storage_CI"
def __init__(self, *args, **kwargs):
super(DellStorageCenterFCDriver, self).__init__(*args, **kwargs)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'Dell-FC'
self.storage_protocol = 'FC'
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
"""
# We use id to name the volume name as it is a
# known unique name.
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
islivevol = self._is_live_vol(volume)
LOG.debug('Initialize connection: %s', volume_name)
with self._client.open_connection() as api:
try:
wwpns = connector.get('wwpns')
# Find our server.
scserver = self._find_server(api, wwpns)
# No? Create it.
if scserver is None:
scserver = api.create_server(
wwpns, self.configuration.dell_server_os)
# Find the volume on the storage center.
scvolume = api.find_volume(volume_name, provider_id, islivevol)
if scserver is not None and scvolume is not None:
mapping = api.map_volume(scvolume, scserver)
if mapping is not None:
# Since we just mapped our volume we had best update
# our sc volume object.
scvolume = api.get_volume(scvolume['instanceId'])
lun, targets, init_targ_map = api.find_wwns(scvolume,
scserver)
# Do we have extra live volume work?
if islivevol:
# Get our volume and our swap state.
sclivevolume, swapped = api.get_live_volume(
provider_id)
# Do not map to a failed over volume.
if sclivevolume and not swapped:
# Now map our secondary.
lvlun, lvtargets, lvinit_targ_map = (
self.initialize_secondary(api,
sclivevolume,
wwpns))
# Unmapped. Add info to our list.
targets += lvtargets
init_targ_map.update(lvinit_targ_map)
# Roll up our return data.
if lun is not None and len(targets) > 0:
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': lun,
'target_discovered': True,
'target_wwn': targets,
'initiator_target_map':
init_targ_map,
'discard': True}}
LOG.debug('Return FC data: %s', data)
return data
LOG.error(_LE('Lun mapping returned null!'))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to initialize connection.'))
# We get here because our mapping is none so blow up.
raise exception.VolumeBackendAPIException(_('Unable to map volume.'))
def _find_server(self, api, wwns, ssn=-1):
for wwn in wwns:
scserver = api.find_server(wwn, ssn)
if scserver is not None:
return scserver
return None
def initialize_secondary(self, api, sclivevolume, wwns):
"""Initialize the secondary connection of a live volume pair.
:param api: Dell SC api object.
:param sclivevolume: Dell SC live volume object.
:param wwns: Cinder list of wwns from the connector.
:return: lun, targets and initiator target map.
"""
# Find our server.
secondary = self._find_server(
api, wwns, sclivevolume['secondaryScSerialNumber'])
# No? Create it.
if secondary is None:
secondary = api.create_server(
wwns, self.configuration.dell_server_os,
sclivevolume['secondaryScSerialNumber'])
if secondary:
if api.map_secondary_volume(sclivevolume, secondary):
# Get mappings.
secondaryvol = api.get_volume(
sclivevolume['secondaryVolume']['instanceId'])
if secondaryvol:
return api.find_wwns(secondaryvol, secondary)
LOG.warning(_LW('Unable to map live volume secondary volume'
' %(vol)s to secondary server wwns: %(wwns)r'),
{'vol': sclivevolume['secondaryVolume']['instanceName'],
'wwns': wwns})
return None, [], {}
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, force=False, **kwargs):
# Get our volume name
volume_name = volume.get('id')
provider_id = volume.get('provider_id')
islivevol = self._is_live_vol(volume)
LOG.debug('Terminate connection: %s', volume_name)
with self._client.open_connection() as api:
try:
wwpns = connector.get('wwpns')
scserver = self._find_server(api, wwpns)
# Find the volume on the storage center.
scvolume = api.find_volume(volume_name, provider_id, islivevol)
# Get our target map so we can return it to free up a zone.
lun, targets, init_targ_map = api.find_wwns(scvolume, scserver)
# Do we have extra live volume work?
if islivevol:
# Get our volume and our swap state.
sclivevolume, swapped = api.get_live_volume(
provider_id)
# Do not map to a failed over volume.
if sclivevolume and not swapped:
lvlun, lvtargets, lvinit_targ_map = (
self.terminate_secondary(api, sclivevolume, wwpns))
# Add to our return.
if lvlun:
targets += lvtargets
init_targ_map.update(lvinit_targ_map)
# If we have a server and a volume lets unmap them.
if (scserver is not None and
scvolume is not None and
api.unmap_volume(scvolume, scserver) is True):
LOG.debug('Connection terminated')
else:
raise exception.VolumeBackendAPIException(
_('Terminate connection failed'))
# basic return info...
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
# if not then we return the target map so that
# the zone can be freed up.
if api.get_volume_count(scserver) == 0:
info['data'] = {'target_wwn': targets,
'initiator_target_map': init_targ_map}
return info
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to terminate connection'))
raise exception.VolumeBackendAPIException(
_('Terminate connection unable to connect to backend.'))
def terminate_secondary(self, api, sclivevolume, wwns):
# Find our server.
secondary = self._find_server(
api, wwns, sclivevolume['secondaryScSerialNumber'])
secondaryvol = api.get_volume(
sclivevolume['secondaryVolume']['instanceId'])
if secondary and secondaryvol:
# Get our map.
lun, targets, init_targ_map = api.find_wwns(secondaryvol,
secondary)
# If we have a server and a volume lets unmap them.
ret = api.unmap_volume(secondaryvol, secondary)
LOG.debug('terminate_secondary: secondary volume %(name)s unmap '
'to secondary server %(server)s result: %(result)r',
{'name': secondaryvol['name'],
'server': secondary['name'],
'result': ret})
# return info for
return lun, targets, init_targ_map
return None, [], {}
|
|
# (C) Copyright 2010 Bewype <http://www.bewype.org>
# sqlite3 import
from sqlite3 import Cursor as sqli3_cursor
from MySQLdb.cursors import Cursor as mysql_cursor
class ExecuteError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InvalidClauseType(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class WhereClause(object):
def __init__(self, type_, criteria_list):
self.type_ = type_
self.criteria_list = criteria_list
def serialize(self):
if self.type_ in ['and', 'or']:
return (' %s ' % self.type_).join(
['(%s)' % _c.serialize() if isinstance(_c, WhereClause)\
else _c for _c in self.criteria_list])
else:
raise InvalidClauseType('Invalid type %s with criteria: %s'\
% (self.type_, self.criteria_list))
class and_(WhereClause):
def __init__(self, criteria_list):
WhereClause.__init__(self, 'and', criteria_list)
class or_(WhereClause):
def __init__(self, criteria_list):
WhereClause.__init__(self, 'or', criteria_list)
def _select_query(db, table_name, column_list=None, where=None, order_list=None,
order='asc', into=None):
# prepare query
_q = 'select %s' % (', '.join(column_list)
if isinstance(column_list, list) else '*')
_q = '%s from %s' % (_q, table_name)
# set where clause
if where is None:
pass
else:
_q = '%s where %s' % (_q, where.serialize())
# set order by ckause
if order_list is None:
pass
else:
_q = '%s order by %s %s' % (_q, ', '.join(order_list), order)
# add into params
if into is None:
_q = '%s;' % _q
else:
_q = '%s into %s;' % (_q, into)
# trigger query
_cursor = db.cursor()
# execute query
_r = _cursor.execute(_q)
# return db and cursor for additional work
return _cursor, _r
def count(db, table_name, where=None):
# get result and db for closing
_cursor, _count = _select_query(db, table_name, where=where)
# simple count factory
if isinstance(_cursor, sqli3_cursor):
_count = len(_cursor.fetchall())
elif isinstance(_cursor, mysql_cursor):
_count = _cursor.rowcount
else:
_count = None
# close the connection
_cursor.close()
# return count
return _count
def create_table(db, table_name, column_defs):
"""Create table to the current db according to the passed table name and
column definitions, ex.::
# params
_column_defs = [
('name', 'char(40)'),
('category', 'char(40)'),
...
]
# create_table
create_table('animal', _column_defs)
"""
# prepare query
_q = 'create table %s' % table_name
_q = '%s (%s);' % (_q, ', '.join([' '.join(_d) for _d in column_defs]))
# return count
return execute(db, _q)
def drop_table(db, table_name):
# prepare query
_q = 'drop table %s;' % table_name
# return count
return execute(db, _q)
def delete(db, table_name, where=None):
# prepare query
_q = 'delete'
_q = '%s from %s' % (_q, table_name)
# criteria or not
if where is None:
_q = '%s;' % _q
else:
_q = '%s where %s;' % (_q, where.serialize())
# return count
return execute(db, _q)
def execute(db, query):
# get cursor
_cursor = db.cursor()
_in_error = False
# do it
try:
# execute
_result = _cursor.execute(query)
# commit your changes in the database
db.commit()
except Exception, e:
# ensure _result var
_in_error = True
_result = e
# rollback in case there is any error
db.rollback()
# close the connection
_cursor.close()
# return result or raise an explicit error
if _in_error:
raise ExecuteError(_result)
else:
return _result
def insert(db, table_name, column_list, values):
"""Insert value in the giaven db and table assuming column list corresponds
with past list of rows to add, ex.::
# params
_columns = ['name', 'category']
_values = [('snake', 'reptile'), ('frog', 'amphibian'), ...]
# insert
insert('animal', _columns, _values)
Corresponding SQL query::
INSERT INTO animal (name, category)
VALUES
('snake', 'reptile'),
('frog', 'amphibian'),
('tuna', 'fish'), ...;
"""
raise ExecuteError('insert should use specific connector')
def _result(row, column_list=None):
# list all
if column_list is None:
return row
elif len(column_list) == 1:
return row[0]
else:
return dict(zip(column_list, [_v for _v in row]))
def select(db, table_name, column_list=None, where=None, order_list=None,
order='asc'):
# get result and db for closing
_cursor, _r = _select_query(db, table_name, column_list=column_list,
where=where, order_list=order_list, order=order)
# sqlite cursor management
if isinstance(_r, sqli3_cursor):
_cursor = _r
else:
pass
# list all
_all = [ _result(_r, column_list=column_list) for _r in _cursor.fetchall()]
# close the connection
_cursor.close()
# return result list
return _all
def eager_select(db, table_name, column_list=None, where=None, order_list=None,
order='asc'):
"""!!!If you stop the eager select stuff.. please do not forget to CLOSE the
connetion using the returned CURSOR ;)!!!
"""
# get result and db for closing
_cursor, _r = _select_query(db, table_name, column_list=column_list,
where=where, order_list=order_list, order=order)
# sqlite cursor management
if isinstance(_r, sqli3_cursor):
_cursor = _r
else:
pass
# first row
_n = _cursor.fetchone()
if _n is None:
# close the connection
_cursor.close()
yield None, None
else:
while(_n):
_cur = _n
_n = _cursor.fetchone()
if _n is None:
# close the connection
_cursor.close()
# yield it
yield _cursor, _result(_cur, column_list=column_list)
def update(db, table_name, column_list, values, uri=None, where=None):
# prepare query
_q = 'update %s' % table_name
# prepare set
_set = ['%s = \'%s\'' % (_c, _v) for _c, _v in zip(column_list, values)]
# update query
_q = '%s set %s' % (_q, ', '.join(_set))
# criteria or not
if where is None:
_q = '%s;' % _q
else:
_q = '%s where %s;' % (_q, where.serialize())
# return count
return execute(db, _q)
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import functools
from oslo_log import log as logging
from oslo_utils import importutils
import six
import webob.dec
import webob.exc
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
LOG = logging.getLogger(__name__)
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
# The name of the extension, e.g., 'Fox In Socks'
name = None
# The alias for the extension, e.g., 'FOXNSOX'
alias = None
# Description comes from the docstring for the class
# The timestamp when the extension was last updated, e.g.,
# '2011-01-22T19:25:27Z'
updated = None
def __init__(self, ext_mgr):
"""Register extension with the extension manager."""
ext_mgr.register(self)
self.ext_mgr = ext_mgr
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_controller_extensions(self):
"""List of extensions.ControllerExtension extension objects.
Controller extensions are used to extend existing controllers.
"""
controller_exts = []
return controller_exts
def __repr__(self):
return "<Extension: name=%s, alias=%s, updated=%s>" % (
self.name, self.alias, self.updated)
def is_valid(self):
"""Validate required fields for extensions.
Raises an attribute error if the attr is not defined
"""
for attr in ('name', 'alias', 'updated', 'namespace'):
if getattr(self, attr) is None:
raise AttributeError("%s is None, needs to be defined" % attr)
return True
class ExtensionsController(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
super(ExtensionsController, self).__init__(None)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.name
ext_data['alias'] = ext.alias
ext_data['description'] = ext.__doc__
ext_data['namespace'] = ext.namespace
ext_data['updated'] = ext.updated
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
def index(self, req):
extensions = []
for ext in self.extension_manager.sorted_extensions():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
def show(self, req, id):
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
def delete(self, req, id):
raise webob.exc.HTTPNotFound()
def create(self, req, body):
raise webob.exc.HTTPNotFound()
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See nova/tests/api/openstack/compute/extensions/foxinsocks.py or an
example extension implementation.
"""
def sorted_extensions(self):
if self.sorted_ext_list is None:
self.sorted_ext_list = sorted(self.extensions.items())
for _alias, ext in self.sorted_ext_list:
yield ext
def is_loaded(self, alias):
return alias in self.extensions
def register(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.alias
if alias in self.extensions:
raise exception.NovaException("Found duplicate extension: %s"
% alias)
self.extensions[alias] = ext
self.sorted_ext_list = None
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionsController(self)))
for ext in self.sorted_extensions():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_controller_extensions(self):
"""Returns a list of ControllerExtension objects."""
controller_exts = []
for ext in self.sorted_extensions():
try:
get_ext_method = ext.get_controller_extensions
except AttributeError:
# NOTE(Vek): Extensions aren't required to have
# controller extensions
continue
controller_exts.extend(get_ext_method())
return controller_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
extension.is_valid()
except AttributeError:
LOG.exception(_LE("Exception loading extension"))
return False
return True
def load_extension(self, ext_factory):
"""Execute an extension factory.
Loads an extension. The 'ext_factory' is the name of a
callable that will be imported and called with one
argument--the extension manager. The factory callable is
expected to call the register() method at least once.
"""
LOG.debug("Loading extension %s", ext_factory)
if isinstance(ext_factory, six.string_types):
# Load the factory
factory = importutils.import_class(ext_factory)
else:
factory = ext_factory
# Call it
LOG.debug("Calling extension factory %s", ext_factory)
factory(self)
def _load_extensions(self):
"""Load extensions specified on the command line."""
extensions = list(self.cls_list)
for ext_factory in extensions:
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warning(_LW('Failed to load extension %(ext_factory)s: '
'%(exc)s'),
{'ext_factory': ext_factory, 'exc': exc})
class ControllerExtension(object):
"""Extend core controllers of nova OpenStack API.
Provide a way to extend existing nova OpenStack API core
controllers.
"""
def __init__(self, extension, collection, controller):
self.extension = extension
self.collection = collection
self.controller = controller
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in nova."""
def __init__(self, collection, controller=None, parent=None,
collection_actions=None, member_actions=None,
custom_routes_fn=None, inherits=None, member_name=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.custom_routes_fn = custom_routes_fn
self.inherits = inherits
self.member_name = member_name
@six.add_metaclass(abc.ABCMeta)
class V21APIExtensionBase(object):
"""Abstract base class for all v2.1 API extensions.
All v2.1 API extensions must derive from this class and implement
the abstract methods get_resources and get_controller_extensions
even if they just return an empty list. The extensions must also
define the abstract properties.
"""
def __init__(self, extension_info):
self.extension_info = extension_info
@abc.abstractmethod
def get_resources(self):
"""Return a list of resources extensions.
The extensions should return a list of ResourceExtension
objects. This list may be empty.
"""
pass
@abc.abstractmethod
def get_controller_extensions(self):
"""Return a list of controller extensions.
The extensions should return a list of ControllerExtension
objects. This list may be empty.
"""
pass
@abc.abstractproperty
def name(self):
"""Name of the extension."""
pass
@abc.abstractproperty
def alias(self):
"""Alias for the extension."""
pass
@abc.abstractproperty
def version(self):
"""Version of the extension."""
pass
def __repr__(self):
return "<Extension: name=%s, alias=%s, version=%s>" % (
self.name, self.alias, self.version)
def is_valid(self):
"""Validate required fields for extensions.
Raises an attribute error if the attr is not defined
"""
for attr in ('name', 'alias', 'version'):
if getattr(self, attr) is None:
raise AttributeError("%s is None, needs to be defined" % attr)
return True
def expected_errors(errors):
"""Decorator for v2.1 API methods which specifies expected exceptions.
Specify which exceptions may occur when an API method is called. If an
unexpected exception occurs then return a 500 instead and ask the user
of the API to file a bug report.
"""
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as exc:
if isinstance(exc, webob.exc.WSGIHTTPException):
if isinstance(errors, int):
t_errors = (errors,)
else:
t_errors = errors
if exc.code in t_errors:
raise
elif isinstance(exc, exception.Forbidden):
# Note(cyeoh): Special case to handle
# Forbidden exceptions so every
# extension method does not need to wrap authorize
# calls. ResourceExceptionHandler silently
# converts NotAuthorized to HTTPForbidden
raise
elif isinstance(exc, exception.ValidationError):
# Note(oomichi): Handle a validation error, which
# happens due to invalid API parameters, as an
# expected error.
raise
elif isinstance(exc, exception.Unauthorized):
# Handle an authorized exception, will be
# automatically converted to a HTTP 401, clients
# like python-novaclient handle this error to
# generate new token and do another attempt.
raise
LOG.exception(_LE("Unexpected exception in API method"))
msg = _('Unexpected API Error. Please report this at '
'http://bugs.launchpad.net/nova/ and attach the Nova '
'API log if possible.\n%s') % type(exc)
raise webob.exc.HTTPInternalServerError(explanation=msg)
return wrapped
return decorator
|
|
"""Test module for enumerated types under PyTables."""
import itertools
import operator
import tables as tb
from tables.tests import common
class CreateColTestCase(common.PyTablesTestCase):
"""Test creating enumerated column descriptions."""
def _createCol(self, enum, dflt, base='uint32', shape=()):
"""Create and check an enumerated column description."""
enumcol = tb.EnumCol(enum, dflt, base=base, shape=shape)
sameEnum = tb.Enum(enum)
self.assertEqual(enumcol.type, 'enum')
self.assertEqual(enumcol.dtype.base.name, enumcol.base.type)
# To avoid 'LongInt' vs 'Int' issues
# self.assertEqual(enumcol.dflt, sameEnum[dflt])
self.assertEqual(int(enumcol.dflt), int(sameEnum[dflt]))
self.assertEqual(enumcol.dtype.shape, shape)
self.assertEqual(enumcol.enum, sameEnum)
def test00a_validFromEnum(self):
"""Describing an enumerated column from an enumeration."""
colors = tb.Enum(['red', 'green', 'blue'])
self._createCol(colors, 'red')
def test00b_validFromDict(self):
"""Describing an enumerated column from a dictionary."""
colors = {'red': 4, 'green': 2, 'blue': 1}
self._createCol(colors, 'red')
def test00c_validFromList(self):
"""Describing an enumerated column from a list."""
colors = ['red', 'green', 'blue']
self._createCol(colors, 'red')
def test00d_invalidFromType(self):
"""Describing an enumerated column from an invalid object."""
colors = 123
self.assertRaises(TypeError, self._createCol, colors, 'red')
def test01_invalidDflt(self):
"""Describing an enumerated column with an invalid default object."""
colors = {'red': 4, 'green': 2, 'blue': 1}
self.assertRaises(KeyError, self._createCol, colors, 'black')
def test02a_validDtypeBroader(self):
"""Describing an enumerated column with a broader type."""
colors = {'red': 4, 'green': 2, 'blue': 1}
self._createCol(colors, 'red', 'int64')
def test02b_invalidDtypeTooNarrow(self):
"""Describing an enumerated column with a too narrow type."""
colors = ['e%d' % i for i in range(300)]
self.assertRaises(TypeError, self._createCol, colors, 'e0', 'uint8')
def test03a_validShapeMD(self):
"""Describing an enumerated column with multidimensional shape."""
colors = ['red', 'green', 'blue']
self._createCol(colors, 'red', shape=(2,))
def test04a_validReprEnum(self):
"""Checking the string representation of an enumeration."""
colors = tb.Enum(['red', 'green', 'blue'])
enumcol = tb.EnumCol(colors, 'red', base='uint32', shape=())
# needed due to "Hash randomization" (default on python 3.3)
template = (
"EnumCol(enum=Enum({%s}), dflt='red', base=UInt32Atom(shape=(), "
"dflt=0), shape=(), pos=None)"
)
permitations = [
template % ', '.join(items) for items in itertools.permutations(
("'blue': 2", "'green': 1", "'red': 0"))
]
self.assertIn(repr(enumcol), permitations)
def test99a_nonIntEnum(self):
"""Describing an enumerated column of floats (not implemented)."""
colors = {'red': 1.0}
self.assertRaises(NotImplementedError, self._createCol, colors, 'red',
base=tb.FloatAtom())
def test99b_nonIntDtype(self):
"""Describing an enumerated column encoded as floats.
(not implemented).
"""
colors = ['red', 'green', 'blue']
self.assertRaises(
NotImplementedError, self._createCol, colors, 'red', 'float64')
def test99b_nonScalarEnum(self):
"""Describing an enumerated column of non-scalars (not implemented)."""
colors = {'red': (1, 2, 3)}
self.assertRaises(NotImplementedError, self._createCol, colors, 'red',
base=tb.IntAtom(shape=3))
class CreateAtomTestCase(common.PyTablesTestCase):
"""Test creating enumerated atoms."""
def _createAtom(self, enum, dflt, base='uint32', shape=()):
"""Create and check an enumerated atom."""
enumatom = tb.EnumAtom(enum, dflt, base=base, shape=shape)
sameEnum = tb.Enum(enum)
self.assertEqual(enumatom.type, 'enum')
self.assertEqual(enumatom.dtype.base.name, enumatom.base.type)
self.assertEqual(enumatom.shape, shape)
self.assertEqual(enumatom.enum, sameEnum)
def test00a_validFromEnum(self):
"""Describing an enumerated atom from an enumeration."""
colors = tb.Enum(['red', 'green', 'blue'])
self._createAtom(colors, 'red')
def test00b_validFromDict(self):
"""Describing an enumerated atom from a dictionary."""
colors = {'red': 4, 'green': 2, 'blue': 1}
self._createAtom(colors, 'red')
def test00c_validFromList(self):
"""Describing an enumerated atom from a list."""
colors = ['red', 'green', 'blue']
self._createAtom(colors, 'red')
def test00d_invalidFromType(self):
"""Describing an enumerated atom from an invalid object."""
colors = 123
self.assertRaises(TypeError, self._createAtom, colors, 'red')
def test02a_validDtypeBroader(self):
"""Describing an enumerated atom with a broader type."""
colors = {'red': 4, 'green': 2, 'blue': 1}
self._createAtom(colors, 'red', base='int64')
def test02b_invalidDtypeTooNarrow(self):
"""Describing an enumerated atom with a too narrow type."""
colors = ['e%d' % i for i in range(300)]
self.assertRaises(TypeError, self._createAtom, colors, 'red', 'uint8')
def test03a_validShapeMD(self):
"""Describing an enumerated atom with multidimensional shape."""
colors = ['red', 'green', 'blue']
self._createAtom(colors, 'red', shape=(2,))
def test99a_nonIntEnum(self):
"""Describing an enumerated atom of floats (not implemented)."""
colors = {'red': 1.0}
self.assertRaises(NotImplementedError, self._createAtom, colors, 'red',
base=tb.FloatAtom())
def test99b_nonIntDtype(self):
"""Describing an enumerated atom encoded as a float.
(not implemented).
"""
colors = ['red', 'green', 'blue']
self.assertRaises(
NotImplementedError, self._createAtom, colors, 'red', 'float64')
def test99b_nonScalarEnum(self):
"""Describing an enumerated atom of non-scalars (not implemented)."""
colors = {'red': (1, 2, 3)}
self.assertRaises(NotImplementedError, self._createAtom, colors, 'red',
base=tb.IntAtom(shape=3))
class EnumTableTestCase(common.TempFileMixin, common.PyTablesTestCase):
"""Test tables with enumerated columns."""
enum = tb.Enum({'red': 4, 'green': 2, 'blue': 1, 'black': 0})
defaultName = 'black'
valueInEnum = enum.red
valueOutOfEnum = 1234
enumType = 'uint16'
def _description(self, shape=()):
class TestDescription(tb.IsDescription):
rid = tb.IntCol(pos=0)
rcolor = tb.EnumCol(
self.enum, self.defaultName,
base=self.enumType, shape=shape, pos=1)
return TestDescription
def test00a_reopen(self):
"""Reopening a file with tables using enumerated data."""
self.h5file.create_table(
'/', 'test', self._description(), title=self._getMethodName())
self._reopen()
self.assertEqual(
self.h5file.root.test.get_enum('rcolor'), self.enum,
"Enumerated type was not restored correctly from disk.")
def test00b_reopenMD(self):
"""Reopening a file with tables using enumerated multi-dimensional
data."""
self.h5file.create_table(
'/', 'test', self._description((2,)), title=self._getMethodName())
self._reopen()
self.assertEqual(
self.h5file.root.test.get_enum('rcolor'), self.enum,
"Enumerated type was not restored correctly from disk.")
def test01_rowAppend(self):
"""Appending enumerated values using ``row.append()``."""
tbl = self.h5file.create_table(
'/', 'test', self._description(), title=self._getMethodName())
appended = [
(10, self.valueInEnum),
(20, self.valueOutOfEnum)]
row = tbl.row
row['rid'] = appended[0][0]
row['rcolor'] = appended[0][1]
row.append()
row['rid'] = appended[1][0]
self.assertRaises(
ValueError, operator.setitem, row, 'rcolor', appended[1][1])
tbl.flush()
tbl.flavor = 'python'
read = tbl.read()
common.verbosePrint(
"* appended value: %s\n"
"* read value: %s\n"
% (appended[:-1], read))
self.assertEqual(
appended[:-1], read, "Written and read values differ.")
def test02_append(self):
"""Appending enumerated values using ``table.append()``."""
tbl = self.h5file.create_table(
'/', 'test', self._description(), title=self._getMethodName())
appended = [
(10, self.valueInEnum),
(20, self.valueOutOfEnum)]
tbl.append(appended)
tbl.flush()
tbl.flavor = 'python'
read = tbl.read()
common.verbosePrint(
"* appended value: %s\n"
"* read value: %s\n"
% (appended, read))
self.assertEqual(appended, read, "Written and read values differ.")
def test03_setitem(self):
"""Changing enumerated values using ``table.__setitem__()``."""
tbl = self.h5file.create_table(
'/', 'test', self._description(), title=self._getMethodName())
appended = [
(10, self.valueInEnum),
(20, self.valueInEnum)]
tbl.append(appended)
written = [
(10, self.valueInEnum),
(20, self.valueOutOfEnum)]
tbl[:] = written
tbl.flavor = 'python'
read = tbl.read()
common.verbosePrint(
"* written value: %s\n"
"* read value: %s\n"
% (written, read))
self.assertEqual(written, read, "Written and read values differ.")
def test04_multidim(self):
"""Appending multi-dimensional enumerated data."""
tbl = self.h5file.create_table(
'/', 'test', self._description((2,)), title=self._getMethodName())
appended = [
(10, (self.valueInEnum, self.valueOutOfEnum)),
(20, (self.valueInEnum, self.valueOutOfEnum))]
row = tbl.row
row['rid'] = appended[0][0]
self.assertRaises(
ValueError, operator.setitem, row, 'rcolor', appended[0][1])
tbl.append(appended)
tbl.flush()
tbl.flavor = 'python'
read = tbl.read()
for x_appended, x_read in zip(appended, read):
self.assertEqual(x_appended[0], x_read[0],
"Written and read values differ.")
self.assertEqual(x_appended[1][0], x_read[1][0],
"Written and read values differ.")
self.assertEqual(x_appended[1][1], x_read[1][1],
"Written and read values differ.")
def test05_where(self):
"""Searching enumerated data."""
tbl = self.h5file.create_table(
'/', 'test', self._description(), title=self._getMethodName())
appended = [
(10, self.valueInEnum),
(20, self.valueInEnum),
(30, self.valueOutOfEnum)]
tbl.append(appended)
tbl.flush()
searched = [
(row['rid'], row['rcolor'])
for row in tbl.where('rcolor == v', {'v': self.valueInEnum})]
common.verbosePrint(
"* ``valueInEnum``: %s\n"
"* ``rcolor`` column: ``%s``\n"
"* ``searched``: %s\n"
"* Should look like: %s\n"
% (self.valueInEnum, tbl.cols.rcolor, searched, appended[:-1]))
self.assertEqual(
searched, appended[:-1], "Search returned incorrect results.")
class EnumEArrayTestCase(common.TempFileMixin, common.PyTablesTestCase):
"""Test extendable arrays of enumerated values."""
enum = tb.Enum({'red': 4, 'green': 2, 'blue': 1, 'black': 0})
valueInEnum = enum.red
valueOutOfEnum = 1234
enumType = 'uint16'
def _atom(self, shape=()):
return tb.EnumAtom(self.enum, 'red', base=self.enumType, shape=shape)
def test00a_reopen(self):
"""Reopening a file with extendable arrays using enumerated data."""
self.h5file.create_earray(
'/', 'test', self._atom(), shape=(0,),
title=self._getMethodName())
self.h5file.root.test.flavor = 'python'
self._reopen()
self.assertEqual(
self.h5file.root.test.get_enum(), self.enum,
"Enumerated type was not restored correctly from disk.")
def test00b_reopenMD(self):
"""Reopening a file with extendable arrays using enumerated
multi-dimensional data."""
self.h5file.create_earray(
'/', 'test', self._atom(), shape=(0, 2),
title=self._getMethodName())
self.h5file.root.test.flavor = 'python'
self._reopen()
self.assertEqual(
self.h5file.root.test.get_enum(), self.enum,
"Enumerated type was not restored correctly from disk.")
def test_enum_default_persistence_red(self):
dflt = 'red'
atom = tb.EnumAtom(self.enum, dflt, base=self.enumType, shape=())
self.h5file.create_earray('/', 'test', atom, shape=(0,),
title=self._getMethodName())
self._reopen()
self.assertEqual(
self.h5file.root.test.get_enum(), self.enum,
"Enumerated type was not restored correctly from disk.")
self.assertEqual(
self.h5file.root.test.atom.dflt, self.enum[dflt],
"The default value of enumerated type was not restored correctly "
"from disk.")
def test_enum_default_persistence_green(self):
dflt = 'green'
atom = tb.EnumAtom(self.enum, dflt, base=self.enumType, shape=())
self.h5file.create_earray('/', 'test', atom, shape=(0,),
title=self._getMethodName())
self._reopen()
self.assertEqual(
self.h5file.root.test.get_enum(), self.enum,
"Enumerated type was not restored correctly from disk.")
self.assertEqual(
self.h5file.root.test.atom.dflt, self.enum[dflt],
"The default value of enumerated type was not restored correctly "
"from disk.")
def test_enum_default_persistence_blue(self):
dflt = 'blue'
atom = tb.EnumAtom(self.enum, dflt, base=self.enumType, shape=())
self.h5file.create_earray('/', 'test', atom, shape=(0,),
title=self._getMethodName())
self._reopen()
self.assertEqual(
self.h5file.root.test.get_enum(), self.enum,
"Enumerated type was not restored correctly from disk.")
self.assertEqual(
self.h5file.root.test.atom.dflt, self.enum[dflt],
"The default value of enumerated type was not restored correctly "
"from disk.")
def test_enum_default_persistence_black(self):
dflt = 'black'
atom = tb.EnumAtom(self.enum, dflt, base=self.enumType, shape=())
self.h5file.create_earray('/', 'test', atom, shape=(0,),
title=self._getMethodName())
self._reopen()
self.assertEqual(
self.h5file.root.test.get_enum(), self.enum,
"Enumerated type was not restored correctly from disk.")
self.assertEqual(
self.h5file.root.test.atom.dflt, self.enum[dflt],
"The default value of enumerated type was not restored correctly "
"from disk.")
def test01_append(self):
"""Appending scalar elements of enumerated values."""
earr = self.h5file.create_earray(
'/', 'test', self._atom(), shape=(0,),
title=self._getMethodName())
earr.flavor = 'python'
appended = [self.valueInEnum, self.valueOutOfEnum]
earr.append(appended)
earr.flush()
read = earr.read()
self.assertEqual(appended, read, "Written and read values differ.")
def test02_appendMD(self):
"""Appending multi-dimensional elements of enumerated values."""
earr = self.h5file.create_earray(
'/', 'test', self._atom(), shape=(0, 2),
title=self._getMethodName())
earr.flavor = 'python'
appended = [
[self.valueInEnum, self.valueOutOfEnum],
[self.valueInEnum, self.valueOutOfEnum]]
earr.append(appended)
earr.flush()
read = earr.read()
self.assertEqual(appended, read, "Written and read values differ.")
def test03_setitem(self):
"""Changing enumerated values using ``earray.__setitem__()``."""
earr = self.h5file.create_earray(
'/', 'test', self._atom(), shape=(0,),
title=self._getMethodName())
earr.flavor = 'python'
appended = (self.valueInEnum, self.valueInEnum)
earr.append(appended)
written = [self.valueInEnum, self.valueOutOfEnum]
earr[:] = written
read = earr.read()
self.assertEqual(written, read, "Written and read values differ.")
class EnumVLArrayTestCase(common.TempFileMixin, common.PyTablesTestCase):
"""Test variable-length arrays of enumerated values."""
enum = tb.Enum({'red': 4, 'green': 2, 'blue': 1, 'black': 0})
valueInEnum = enum.red
valueOutOfEnum = 1234
enumType = 'uint16'
def _atom(self, shape=()):
return tb.EnumAtom(self.enum, 'red', base=self.enumType, shape=shape)
def test00a_reopen(self):
"""Reopening a file with variable-length arrays using
enumerated data."""
self.h5file.create_vlarray(
'/', 'test', self._atom(),
title=self._getMethodName())
self.h5file.root.test.flavor = 'python'
self._reopen()
self.assertEqual(
self.h5file.root.test.get_enum(), self.enum,
"Enumerated type was not restored correctly from disk.")
def test00b_reopenMD(self):
"""Reopening a file with variable-length arrays using enumerated
multi-dimensional data."""
self.h5file.create_vlarray(
'/', 'test', self._atom((2,)),
title=self._getMethodName())
self.h5file.root.test.flavor = 'python'
self._reopen()
self.assertEqual(
self.h5file.root.test.get_enum(), self.enum,
"Enumerated type was not restored correctly from disk.")
def test01_append(self):
"""Appending scalar elements of enumerated values."""
vlarr = self.h5file.create_vlarray(
'/', 'test', self._atom(),
title=self._getMethodName())
vlarr.flavor = 'python'
appended = [
[self.valueInEnum, ],
[self.valueInEnum, self.valueOutOfEnum]]
vlarr.append(appended[0])
vlarr.append(appended[1])
vlarr.flush()
read = vlarr.read()
common.verbosePrint(
"* appended value: %s\n"
"* read value: %s\n"
% (appended, read))
self.assertEqual(appended, read, "Written and read values differ.")
def test02_appendMD(self):
"""Appending multi-dimensional elements of enumerated values."""
vlarr = self.h5file.create_vlarray(
'/', 'test', self._atom((2,)),
title=self._getMethodName())
vlarr.flavor = 'python'
appended = [
[[self.valueInEnum, self.valueInEnum], ],
[[self.valueInEnum, self.valueOutOfEnum],
[self.valueInEnum, self.valueInEnum]]]
vlarr.append(appended[0])
vlarr.append(appended[1])
vlarr.flush()
read = vlarr.read()
common.verbosePrint(
"* appended value: %s\n"
"* read value: %s\n"
% (appended, read))
self.assertEqual(appended, read, "Written and read values differ.")
def test03_setitem(self):
"""Changing enumerated values using ``vlarray.__setitem__()``."""
vlarr = self.h5file.create_vlarray(
'/', 'test', self._atom(),
title=self._getMethodName())
vlarr.flavor = 'python'
appended = (self.valueInEnum, self.valueInEnum)
vlarr.append(appended)
written = [self.valueInEnum, self.valueOutOfEnum]
vlarr[0] = written
read = vlarr.read()
common.verbosePrint(
"* written value: %s\n"
"* read value: %s\n"
% (written, read))
self.assertEqual(written, read[0], "Written and read values differ.")
def suite():
"""Return a test suite consisting of all the test cases in the module."""
# These two are for including Enum's doctests here.
import doctest
theSuite = common.unittest.TestSuite()
niter = 1
# theSuite.addTest(unittest.makeSuite(EnumTableTestCase))
for i in range(niter):
theSuite.addTest(doctest.DocTestSuite(tb.misc.enum))
theSuite.addTest(common.unittest.makeSuite(CreateColTestCase))
theSuite.addTest(common.unittest.makeSuite(CreateAtomTestCase))
theSuite.addTest(common.unittest.makeSuite(EnumTableTestCase))
theSuite.addTest(common.unittest.makeSuite(EnumEArrayTestCase))
theSuite.addTest(common.unittest.makeSuite(EnumVLArrayTestCase))
return theSuite
if __name__ == '__main__':
import sys
common.parse_argv(sys.argv)
common.print_versions()
common.unittest.main(defaultTest='suite')
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle as pickle
import os
import signal
import sys
import time
from swift import gettext_ as _
from random import random
from eventlet import spawn, patcher, Timeout
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, renamer, write_pickle, \
dump_recon_cache, config_true_value, ismount
from swift.common.daemon import Daemon
from swift.common.storage_policy import split_policy_string, PolicyError
from swift.obj.diskfile import get_tmp_dir, ASYNCDIR_BASE
from swift.common.http import is_success, HTTP_NOT_FOUND, \
HTTP_INTERNAL_SERVER_ERROR
class ObjectUpdater(Daemon):
"""Update object information in container listings."""
def __init__(self, conf, logger=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 300))
self.container_ring = None
self.concurrency = int(conf.get('concurrency', 1))
self.slowdown = float(conf.get('slowdown', 0.01))
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.successes = 0
self.failures = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, 'object.recon')
def _listdir(self, path):
try:
return os.listdir(path)
except OSError as e:
self.logger.error(_('ERROR: Unable to access %(path)s: '
'%(error)s') %
{'path': path, 'error': e})
return []
def get_container_ring(self):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.container_ring = Ring(self.swift_dir, ring_name='container')
return self.container_ring
def run_forever(self, *args, **kwargs):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin object update sweep'))
begin = time.time()
pids = []
# read from container ring to ensure it's fresh
self.get_container_ring().get_nodes('')
for device in self._listdir(self.devices):
if self.mount_check and \
not ismount(os.path.join(self.devices, device)):
self.logger.increment('errors')
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
while len(pids) >= self.concurrency:
pids.remove(os.wait()[0])
pid = os.fork()
if pid:
pids.append(pid)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
patcher.monkey_patch(all=False, socket=True)
self.successes = 0
self.failures = 0
forkbegin = time.time()
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - forkbegin
self.logger.info(
_('Object update sweep of %(device)s'
' completed: %(elapsed).02fs, %(success)s successes'
', %(fail)s failures'),
{'device': device, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - begin
self.logger.info(_('Object update sweep completed: %.02fs'),
elapsed)
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the updater once."""
self.logger.info(_('Begin object update single threaded sweep'))
begin = time.time()
self.successes = 0
self.failures = 0
for device in self._listdir(self.devices):
if self.mount_check and \
not ismount(os.path.join(self.devices, device)):
self.logger.increment('errors')
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - begin
self.logger.info(
_('Object update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures})
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
def object_sweep(self, device):
"""
If there are async pendings on the device, walk each one and update.
:param device: path to device
"""
start_time = time.time()
# loop through async pending dirs for all policies
for asyncdir in self._listdir(device):
# we only care about directories
async_pending = os.path.join(device, asyncdir)
if not os.path.isdir(async_pending):
continue
if not asyncdir.startswith(ASYNCDIR_BASE):
# skip stuff like "accounts", "containers", etc.
continue
try:
base, policy = split_policy_string(asyncdir)
except PolicyError as e:
self.logger.warn(_('Directory %r does not map '
'to a valid policy (%s)') % (asyncdir, e))
continue
for prefix in self._listdir(async_pending):
prefix_path = os.path.join(async_pending, prefix)
if not os.path.isdir(prefix_path):
continue
last_obj_hash = None
for update in sorted(self._listdir(prefix_path), reverse=True):
update_path = os.path.join(prefix_path, update)
if not os.path.isfile(update_path):
continue
try:
obj_hash, timestamp = update.split('-')
except ValueError:
self.logger.increment('errors')
self.logger.error(
_('ERROR async pending file with unexpected '
'name %s')
% (update_path))
continue
if obj_hash == last_obj_hash:
self.logger.increment("unlinks")
os.unlink(update_path)
else:
self.process_object_update(update_path, device,
policy)
last_obj_hash = obj_hash
time.sleep(self.slowdown)
try:
os.rmdir(prefix_path)
except OSError:
pass
self.logger.timing_since('timing', start_time)
def process_object_update(self, update_path, device, policy):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
:param policy: storage policy of object update
"""
try:
update = pickle.load(open(update_path, 'rb'))
except Exception:
self.logger.exception(
_('ERROR Pickle problem, quarantining %s'), update_path)
self.logger.increment('quarantines')
target_path = os.path.join(device, 'quarantined', 'objects',
os.path.basename(update_path))
renamer(update_path, target_path, fsync=False)
return
successes = update.get('successes', [])
part, nodes = self.get_container_ring().get_nodes(
update['account'], update['container'])
obj = '/%s/%s/%s' % \
(update['account'], update['container'], update['obj'])
headers_out = update['headers'].copy()
headers_out['user-agent'] = 'object-updater %s' % os.getpid()
headers_out.setdefault('X-Backend-Storage-Policy-Index',
str(int(policy)))
events = [spawn(self.object_update,
node, part, update['op'], obj, headers_out)
for node in nodes if node['id'] not in successes]
success = True
new_successes = False
for event in events:
event_success, node_id = event.wait()
if event_success is True:
successes.append(node_id)
new_successes = True
else:
success = False
if success:
self.successes += 1
self.logger.increment('successes')
self.logger.debug('Update sent for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
self.logger.increment("unlinks")
os.unlink(update_path)
else:
self.failures += 1
self.logger.increment('failures')
self.logger.debug('Update failed for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
if new_successes:
update['successes'] = successes
write_pickle(update, update_path, os.path.join(
device, get_tmp_dir(policy)))
def object_update(self, node, part, op, obj, headers_out):
"""
Perform the object update to the container
:param node: node dictionary from the container ring
:param part: partition that holds the container
:param op: operation performed (ex: 'POST' or 'DELETE')
:param obj: object name being updated
:param headers_out: headers to send with the update
"""
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, op, obj, headers_out)
with Timeout(self.node_timeout):
resp = conn.getresponse()
resp.read()
success = (is_success(resp.status) or
resp.status == HTTP_NOT_FOUND)
return (success, node['id'])
except (Exception, Timeout):
self.logger.exception(_('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s'), node)
return HTTP_INTERNAL_SERVER_ERROR, node['id']
|
|
from __future__ import unicode_literals
from reviewboard.webapi.tests.mixins import test_template
from reviewboard.webapi.tests.mixins_extra_data import (ExtraDataItemMixin,
ExtraDataListMixin)
class ReviewListMixin(ExtraDataListMixin):
@test_template
def test_post_with_text_type_markdown(self):
"""Testing the POST <URL> API with text_type=markdown"""
self._test_post_with_text_types(
text_type_field='text_type',
text_type_value='markdown',
expected_body_top_text_type='markdown',
expected_body_bottom_text_type='markdown')
@test_template
def test_post_with_text_type_plain(self):
"""Testing the POST <URL> API with text_type=plain"""
self._test_post_with_text_types(
text_type_field='text_type',
text_type_value='plain',
expected_body_top_text_type='plain',
expected_body_bottom_text_type='plain')
@test_template
def test_post_with_body_top_text_type_markdown(self):
"""Testing the POST <URL> API with body_top_text_type=markdown"""
self._test_post_with_text_types(
text_type_field='body_top_text_type',
text_type_value='markdown',
expected_body_top_text_type='markdown',
expected_body_bottom_text_type='plain')
@test_template
def test_post_with_body_top_text_type_plain(self):
"""Testing the POST <URL> API with body_top_text_type=plain"""
self._test_post_with_text_types(
text_type_field='body_top_text_type',
text_type_value='plain',
expected_body_top_text_type='plain',
expected_body_bottom_text_type='plain')
@test_template
def test_post_with_body_bottom_text_type_markdown(self):
"""Testing the POST <URL> API with body_bottom_text_type=markdown"""
self._test_post_with_text_types(
text_type_field='body_bottom_text_type',
text_type_value='markdown',
expected_body_top_text_type='plain',
expected_body_bottom_text_type='markdown')
@test_template
def test_post_with_body_bottom_text_type_plain(self):
"""Testing the POST <URL> API with body_bottom_text_type=plain"""
self._test_post_with_text_types(
text_type_field='body_bottom_text_type',
text_type_value='plain',
expected_body_top_text_type='plain',
expected_body_bottom_text_type='plain')
def _test_post_with_text_types(self, text_type_field, text_type_value,
expected_body_top_text_type,
expected_body_bottom_text_type):
body_top = '`This` is **body_top**'
body_bottom = '`This` is **body_bottom**'
url, mimetype, data, objs = \
self.setup_basic_post_test(self.user, False, None, True)
rsp = self.api_post(
url,
{
'body_top': body_top,
'body_bottom': body_bottom,
text_type_field: text_type_value,
},
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_rsp = rsp[self.resource.item_result_key]
self.assertEqual(review_rsp['body_top'], body_top)
self.assertEqual(review_rsp['body_bottom'], body_bottom)
self.assertEqual(review_rsp['body_top_text_type'],
expected_body_top_text_type)
self.assertEqual(review_rsp['body_bottom_text_type'],
expected_body_bottom_text_type)
self.compare_item(review_rsp,
self.resource.model.objects.get(pk=review_rsp['id']))
class ReviewItemMixin(ExtraDataItemMixin):
@test_template
def test_get_with_markdown_and_force_text_type_markdown(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='markdown',
expected_text=r'\# `This` is a **test**')
@test_template
def test_get_with_markdown_and_force_text_type_plain(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=plain
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='plain',
expected_text='# `This` is a **test**')
@test_template
def test_get_with_markdown_and_force_text_type_html(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=html
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='html',
expected_text='<p># <code>This</code> is a '
'<strong>test</strong></p>')
@test_template
def test_get_with_plain_and_force_text_type_markdown(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='markdown',
expected_text=r'\#<\`This\` is a \*\*test\*\*>')
@test_template
def test_get_with_plain_and_force_text_type_plain(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=plain
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='plain',
expected_text='#<`This` is a **test**>')
@test_template
def test_get_with_plain_and_force_text_type_html(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=html
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='html',
expected_text='#<`This` is a **test**>')
@test_template
def test_put_with_text_type_markdown(self):
"""Testing the POST <URL> API with text_type=markdown"""
self._test_put_with_text_types(
text_type_field='text_type',
text_type_value='markdown',
expected_body_top_text_type='markdown',
expected_body_bottom_text_type='markdown')
@test_template
def test_put_with_text_type_plain(self):
"""Testing the POST <URL> API with text_type=plain"""
self._test_put_with_text_types(
text_type_field='text_type',
text_type_value='plain',
expected_body_top_text_type='plain',
expected_body_bottom_text_type='plain')
@test_template
def test_put_with_body_top_text_type_markdown(self):
"""Testing the POST <URL> API with body_top_text_type=markdown"""
self._test_put_with_text_types(
text_type_field='body_top_text_type',
text_type_value='markdown',
expected_body_top_text_type='markdown',
expected_body_bottom_text_type='plain')
@test_template
def test_put_with_body_top_text_type_plain(self):
"""Testing the POST <URL> API with body_top_text_type=plain"""
self._test_put_with_text_types(
text_type_field='body_top_text_type',
text_type_value='plain',
expected_body_top_text_type='plain',
expected_body_bottom_text_type='plain')
@test_template
def test_put_with_body_bottom_text_type_markdown(self):
"""Testing the POST <URL> API with body_bottom_text_type=markdown"""
self._test_put_with_text_types(
text_type_field='body_bottom_text_type',
text_type_value='markdown',
expected_body_top_text_type='plain',
expected_body_bottom_text_type='markdown')
@test_template
def test_put_with_body_bottom_text_type_plain(self):
"""Testing the POST <URL> API with body_bottom_text_type=plain"""
self._test_put_with_text_types(
text_type_field='body_bottom_text_type',
text_type_value='plain',
expected_body_top_text_type='plain',
expected_body_bottom_text_type='plain')
def _test_get_with_force_text_type(self, text, rich_text,
force_text_type, expected_text):
url, mimetype, review = \
self.setup_basic_get_test(self.user, False, None)
review.body_top = text
review.body_bottom = text
review.body_top_rich_text = rich_text
review.body_bottom_rich_text = rich_text
review.save()
rsp = self.api_get(url + '?force-text-type=%s' % force_text_type,
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
review_rsp = rsp[self.resource.item_result_key]
self.assertEqual(review_rsp['body_top_text_type'], force_text_type)
self.assertEqual(review_rsp['body_bottom_text_type'], force_text_type)
self.assertEqual(review_rsp['body_top'], expected_text)
self.assertEqual(review_rsp['body_bottom'], expected_text)
self.assertNotIn('raw_text_fields', review_rsp)
rsp = self.api_get('%s?force-text-type=%s&include-text-types=raw'
% (url, force_text_type),
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_rsp = rsp[self.resource.item_result_key]
self.assertIn('raw_text_fields', review_rsp)
raw_text_fields = review_rsp['raw_text_fields']
self.assertEqual(raw_text_fields['body_top'], text)
self.assertEqual(raw_text_fields['body_bottom'], text)
def _test_put_with_text_types(self, text_type_field, text_type_value,
expected_body_top_text_type,
expected_body_bottom_text_type):
body_top = '`This` is **body_top**'
body_bottom = '`This` is **body_bottom**'
url, mimetype, data, review, objs = \
self.setup_basic_put_test(self.user, False, None, True)
data.update({
'body_top': body_top,
'body_bottom': body_bottom,
text_type_field: text_type_value,
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
review_rsp = rsp[self.resource.item_result_key]
self.assertEqual(review_rsp['body_top'], body_top)
self.assertEqual(review_rsp['body_bottom'], body_bottom)
self.assertEqual(review_rsp['body_top_text_type'],
expected_body_top_text_type)
self.assertEqual(review_rsp['body_bottom_text_type'],
expected_body_bottom_text_type)
self.compare_item(review_rsp,
self.resource.model.objects.get(pk=review_rsp['id']))
|
|
import jwt, base64, json
from collections import namedtuple
from werkzeug.exceptions import InternalServerError
from flask_restful import reqparse, request, Resource, abort
from functools import wraps
from time import sleep
from jenova.models import UserSchema, Scope
from jenova.components import Security, InvalidCredentials, CallLogger, JwtInconsistentDataError
from jenova.components.tasks import (
update_cos_into_domain_zimbra_task, create_domain_zimbra_task,
delete_domain_zimbra_task, create_delegated_zimbra_admin_task
)
logger = CallLogger.logger()
TASK_TYPES = ['createzimbradomains']
QUERY_FILTER_IDS = ['id', 'client_id', 'authentication_id', 'domain_id', 'service_id']
RESERVED_NAMES = ['inova', 'jenova', 'inovatec', 'jnv', 'all']
DEFAULT_SCOPES = [
'dns',
'domain',
'service',
'store',
'users',
'zimbra',
'client',
'permissions'
]
PERMS = ['write', 'read', 'edit', 'delete']
def abort_if_obj_doesnt_exist(filter_by, target, model_object):
if filter_by in QUERY_FILTER_IDS:
try:
target = int(target)
except ValueError, e:
raise
query = { filter_by : target }
result = model_object.query.filter_by(**query).first()
if not result:
abort(404, message='Could not find object: %s' % target)
return result
def exception_handler(f):
@wraps(f)
def decorated(*args, **kwargs):
resource = f.__self__
# Check for violation!
# TODO: This method will query for the resources on the most of the resource classes,
# maybe it is possible to pass the model object throughout this decorated method
resource.is_forbidden(**kwargs)
# Check permissions only if isn't an admin
if not resource.is_an_admin:
if f.__name__ == 'get':
resource.can_read(resource.scope)
elif f.__name__ == 'put':
resource.can_edit(resource.scope)
elif f.__name__ == 'post':
resource.can_write(resource.scope)
elif f.__name__ == 'delete':
resource.can_delete(resource.scope)
if request.data and type(request.json) != dict:
abort(415, message = 'Expecting JSON')
return f(*args, **kwargs)
return decorated
class BaseResource(Resource):
method_decorators = [exception_handler]
def __init__(self, filters, default_filter='name', **kwargs):
self.logger = logger
parser = reqparse.RequestParser()
parser.add_argument('filter_by', type=str, location='args')
self.filter_by = parser.parse_args().get('filter_by') or default_filter
if self.filter_by not in filters:
err_message = 'Wrong query filter specified %s. Accept only: %s' % (self.filter_by, ', '.join(filters))
abort(400, message=err_message)
self.parser = reqparse.RequestParser()
self.jwt_payload = self.check_auth()
def check_auth(self):
auth = request.headers.get('Authorization', None)
message = ''
if not auth:
abort(401, message = 'Authorization header is expected')
parts = auth.split()
if parts[0].lower() != 'bearer':
message = 'Authorization header must start with Bearer'
elif len(parts) == 1:
message = 'Token not found'
elif len(parts) > 2:
message = 'Authorization header must be Bearer + \s + token'
if message:
abort(401, message = message)
token = parts[1]
try:
payload = jwt.decode(
token,
Security.get_jwt_skey(),
algorithms = ['HS256']
)
except jwt.ExpiredSignature:
message = 'token is expired'
except jwt.InvalidAudienceError:
message = 'incorrect audience'
except jwt.DecodeError:
message = 'token signature is invalid'
if message:
abort(401, message = message)
self.logger.debug('Access granted for %s!' % payload['user']['login'])
return payload
@property
def request_user_login(self):
return self.jwt_payload['user']['login']
@property
def is_admin(self):
return self.jwt_payload['user']['admin']
@property
def is_global_admin(self):
return self.jwt_payload['user']['global_admin']
@property
def is_an_admin(self):
return self.is_admin or self.is_global_admin
@property
def request_user_client_id(self):
return self.jwt_payload['user']['client_id']
@property
def request_user_reseller_id(self):
# It's a reseller admin user
if self.jwt_payload['user']['reseller']:
return self.jwt_payload['user']['reseller']['id']
return self.jwt_payload['user']['client']['reseller']['id']
@property
def request_user_id(self):
return self.jwt_payload['user']['id']
### PERMISSIONS METHODS ###
"""
# It is possible to override these methods on each Resource classes.
# This will give more flexibility by implementing your own behavior.
# Edit this methods on this class only if you need to change the whole logic,
# otherwise, override this methods in resources classes that inherit from this class.
# For disabling the behavior, override this method with the pass operator:
# def can_read():
# pass
"""
def can_read(self, scope_name):
""" Check if it has permission to read. Has to be evaluated on every GET HTTP methods.
"""
has_read_perm = False
for perm in self.jwt_payload['user']['permissions']:
if perm['scope']['name'] == scope_name:
has_read_perm = True
if not perm.get('read'):
has_read_perm = False
break
if not has_read_perm:
abort(403, message = 'Permission denied! Does not have proper permission.')
def can_write(self, scope_name):
""" Check if it has permission to write. Has to be evaluated on every POST HTTP methods.
"""
has_write_perm = False
for perm in self.jwt_payload['user']['permissions']:
if perm['scope']['name'] == scope_name:
has_write_perm = True
if not perm.get('write'):
has_write_perm = False
break
if not has_write_perm:
abort(403, message = 'Permission denied! Does not have proper permission.')
def can_edit(self, scope_name):
""" Check if it has permission to edit. Has to be evaluated on every PUT/PATCH HTTP methods.
"""
has_edit_perm = False
for perm in self.jwt_payload['user']['permissions']:
if perm['scope']['name'] == scope_name:
has_edit_perm = True
if not perm.get('edit'):
has_edit_perm = False
break
if not has_edit_perm:
abort(403, message = 'Permission denied! Does not have proper permission.')
def can_delete(self, scope_name):
""" Check if it has permission to delete. Has to be evaluated on every DELETE HTTP methods.
"""
has_del_perm = False
for perm in self.jwt_payload['user']['permissions']:
if perm['scope']['name'] == scope_name:
has_del_perm = True
if not perm.get('delete'):
has_del_perm = False
break
if not has_del_perm:
abort(403, message = 'Permission denied! Does not have proper permission.')
def is_forbidden(self, **kwargs):
""" Check if the resource is allowed by a global admin user. It must be overrided if the user
is not a global admin, the contraints must be evaluated accordingly,
must ensure if the request user is the owner of the requested resource.
:param kwargs: The resource attributes for validating the contraints
"""
if not self.is_global_admin: abort(403, message = 'Permission denied! Does not have enough permissions.')
class TaskResource(BaseResource):
def __init__(self):
filters = ['id', 'name']
super(TaskResource, self).__init__(filters)
def is_forbidden(self, **kwargs): pass
def can_read(self): pass
def get(self, task_type, task_id):
if task_type == 'createzimbradomains':
task = create_domain_zimbra_task.AsyncResult(task_id)
elif task_type == 'createdelegatedzimbra':
task = create_delegated_zimbra_admin_task.AsyncResult(task_id)
else:
abort(400, message = 'Wrong task_type specified')
try:
task_state = task.state
task_executed = task.ready()
except Exception:
task_state = 'ERROR'
task_executed = True
return {
'response' : {
'task_state' : task_state,
'task_executed' : task_executed
}
}
|
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an image service that uses Glance as the backend."""
from __future__ import absolute_import
import copy
import itertools
import json
import random
import sys
import time
import glanceclient
import glanceclient.exc
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
from nova import exception
import nova.image.download as image_xfers
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
glance_opts = [
cfg.StrOpt('glance_host',
default='$my_ip',
help='Default glance hostname or IP address'),
cfg.IntOpt('glance_port',
default=9292,
help='Default glance port'),
cfg.StrOpt('glance_protocol',
default='http',
help='Default protocol to use when connecting to glance. '
'Set to https for SSL.'),
cfg.ListOpt('glance_api_servers',
default=['$glance_host:$glance_port'],
help='A list of the glance api servers available to nova. '
'Prefix with https:// for ssl-based glance api servers. '
'([hostname|ip]:port)'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number of retries when downloading an image from glance'),
cfg.ListOpt('allowed_direct_url_schemes',
default=[],
help='A list of url scheme that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file].'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(glance_opts)
CONF.import_opt('auth_strategy', 'nova.api.auth')
CONF.import_opt('my_ip', 'nova.netconf')
def generate_glance_url():
"""Generate the URL to glance."""
glance_host = CONF.glance_host
if utils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
return "%s://%s:%d" % (CONF.glance_protocol, glance_host,
CONF.glance_port)
def generate_image_url(image_ref):
"""Generate an image URL from an image_ref."""
return "%s/images/%s" % (generate_glance_url(), image_ref)
def _parse_image_ref(image_href):
"""Parse an image href into composite parts.
:param image_href: href of an image
:returns: a tuple of the form (image_id, host, port)
:raises ValueError
"""
o = urlparse.urlparse(image_href)
port = o.port or 80
host = o.netloc.rsplit(':', 1)[0]
image_id = o.path.split('/')[-1]
use_ssl = (o.scheme == 'https')
return (image_id, host, port, use_ssl)
def generate_identity_headers(context, status='Confirmed'):
return {
'X-Auth-Token': getattr(context, 'auth_token', None),
'X-User-Id': getattr(context, 'user', None),
'X-Tenant-Id': getattr(context, 'tenant', None),
'X-Roles': ','.join(context.roles),
'X-Identity-Status': status,
'X-Service-Catalog': json.dumps(context.service_catalog),
}
def _create_glance_client(context, host, port, use_ssl, version=1):
"""Instantiate a new glanceclient.Client object."""
params = {}
if use_ssl:
scheme = 'https'
# https specific params
params['insecure'] = CONF.glance_api_insecure
params['ssl_compression'] = False
else:
scheme = 'http'
if CONF.auth_strategy == 'keystone':
# NOTE(isethi): Glanceclient <= 0.9.0.49 accepts only
# keyword 'token', but later versions accept both the
# header 'X-Auth-Token' and 'token'
params['token'] = context.auth_token
params['identity_headers'] = generate_identity_headers(context)
if utils.is_valid_ipv6(host):
#if so, it is ipv6 address, need to wrap it with '[]'
host = '[%s]' % host
endpoint = '%s://%s:%s' % (scheme, host, port)
return glanceclient.Client(str(version), endpoint, **params)
def get_api_servers():
"""Shuffle a list of CONF.glance_api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary.
"""
api_servers = []
for api_server in CONF.glance_api_servers:
if '//' not in api_server:
api_server = 'http://' + api_server
o = urlparse.urlparse(api_server)
port = o.port or 80
host = o.netloc.rsplit(':', 1)[0]
if host[0] == '[' and host[-1] == ']':
host = host[1:-1]
use_ssl = (o.scheme == 'https')
api_servers.append((host, port, use_ssl))
random.shuffle(api_servers)
return itertools.cycle(api_servers)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, host=None, port=None, use_ssl=False,
version=1):
if host is not None:
self.client = self._create_static_client(context,
host, port,
use_ssl, version)
else:
self.client = None
self.api_servers = None
def _create_static_client(self, context, host, port, use_ssl, version):
"""Create a client that we'll use for every call."""
self.host = host
self.port = port
self.use_ssl = use_ssl
self.version = version
return _create_glance_client(context,
self.host, self.port,
self.use_ssl, self.version)
def _create_onetime_client(self, context, version):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers()
self.host, self.port, self.use_ssl = self.api_servers.next()
return _create_glance_client(context,
self.host, self.port,
self.use_ssl, version)
def call(self, context, version, method, *args, **kwargs):
"""Call a glance client method. If we get a connection error,
retry the request according to CONF.glance_num_retries.
"""
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
num_attempts = 1 + CONF.glance_num_retries
for attempt in xrange(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
version)
try:
return getattr(client.images, method)(*args, **kwargs)
except retry_excs as e:
host = self.host
port = self.port
extra = "retrying"
error_msg = (_("Error contacting glance server "
"'%(host)s:%(port)s' for '%(method)s', "
"%(extra)s.") %
{'host': host, 'port': port,
'method': method, 'extra': extra})
if attempt == num_attempts:
extra = 'done trying'
LOG.exception(error_msg)
raise exception.GlanceConnectionFailed(
host=host, port=port, reason=str(e))
LOG.exception(error_msg)
time.sleep(1)
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
#NOTE(jbresnah) build the table of download handlers at the beginning
# so that operators can catch errors at load time rather than whenever
# a user attempts to use a module. Note this cannot be done in glance
# space when this python module is loaded because the download module
# may require configuration options to be parsed.
self._download_handlers = {}
download_modules = image_xfers.load_transfer_modules()
for scheme, mod in download_modules.iteritems():
if scheme not in CONF.allowed_direct_url_schemes:
continue
try:
self._download_handlers[scheme] = mod.get_download_handler()
except Exception as ex:
fmt = _('When loading the module %(module_str)s the '
'following error occurred: %(ex)s')
LOG.error(fmt % {'module_str': str(mod), 'ex': ex})
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = _extract_query_params(kwargs)
try:
images = self._client.call(context, 1, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if _is_image_available(context, image):
_images.append(_translate_from_glance(image))
return _images
def show(self, context, image_id):
"""Returns a dict with image data for the given opaque image id."""
try:
image = self._client.call(context, 1, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not _is_image_available(context, image):
raise exception.ImageNotFound(image_id=image_id)
base_image_meta = _translate_from_glance(image)
return base_image_meta
def _get_transfer_module(self, scheme):
try:
return self._download_handlers[scheme]
except KeyError:
return None
except Exception as ex:
LOG.error(_("Failed to instantiate the download handler "
"for %(scheme)s") % {'scheme': scheme})
return
def download(self, context, image_id, data=None, dst_path=None):
"""Calls out to Glance for data and writes data."""
if CONF.allowed_direct_url_schemes and dst_path is not None:
locations = _get_locations(self._client, context, image_id)
for entry in locations:
loc_url = entry['url']
loc_meta = entry['metadata']
o = urlparse.urlparse(loc_url)
xfer_mod = self._get_transfer_module(o.scheme)
if xfer_mod:
try:
xfer_mod.download(context, o, dst_path, loc_meta)
msg = _("Successfully transferred "
"using %s") % o.scheme
LOG.info(msg)
return
except Exception as ex:
LOG.exception(ex)
try:
image_chunks = self._client.call(context, 1, 'data', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
close_file = False
if data is None and dst_path:
data = open(dst_path, 'wb')
close_file = True
if data is None:
return image_chunks
else:
try:
for chunk in image_chunks:
data.write(chunk)
finally:
if close_file:
data.close()
def create(self, context, image_meta, data=None):
"""Store the image data and return the new image object."""
sent_service_image_meta = _translate_to_glance(image_meta)
if data:
sent_service_image_meta['data'] = data
try:
recv_service_image_meta = self._client.call(
context, 1, 'create', **sent_service_image_meta)
except glanceclient.exc.HTTPException:
_reraise_translated_exception()
return _translate_from_glance(recv_service_image_meta)
def update(self, context, image_id, image_meta, data=None,
purge_props=True):
"""Modify the given image with the new data."""
image_meta = _translate_to_glance(image_meta)
image_meta['purge_props'] = purge_props
#NOTE(bcwaldon): id is not an editable field, but it is likely to be
# passed in by calling code. Let's be nice and ignore it.
image_meta.pop('id', None)
if data:
image_meta['data'] = data
try:
image_meta = self._client.call(context, 1, 'update',
image_id, **image_meta)
except Exception:
_reraise_translated_image_exception(image_id)
else:
return _translate_from_glance(image_meta)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
:raises: NotAuthorized if the user is not an owner.
:raises: ImageNotAuthorized if the user is not authorized.
"""
try:
self._client.call(context, 1, 'delete', image_id)
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_id)
except glanceclient.exc.HTTPForbidden:
raise exception.ImageNotAuthorized(image_id=image_id)
return True
def _get_locations(client, context, image_id):
"""Returns the direct url representing the backend storage location,
or None if this attribute is not shown by Glance.
"""
try:
image_meta = client.call(context, 2, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not _is_image_available(context, image_meta):
raise exception.ImageNotFound(image_id=image_id)
locations = getattr(image_meta, 'locations', [])
du = getattr(image_meta, 'direct_url', None)
if du:
locations.append({'url': du, 'metadata': {}})
return locations
def _extract_query_params(params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'page_size', 'sort_key', 'sort_dir')
for param in accepted_params:
if params.get(param):
_params[param] = params.get(param)
# ensure filters is a dict
_params.setdefault('filters', {})
# NOTE(vish): don't filter out private images
_params['filters'].setdefault('is_public', 'none')
return _params
def _is_image_available(context, image):
"""Check image availability.
This check is needed in case Nova and Glance are deployed
without authentication turned on.
"""
# The presence of an auth token implies this is an authenticated
# request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and context.auth_token:
return True
def _is_image_public(image):
# NOTE(jaypipes) V2 Glance API replaced the is_public attribute
# with a visibility attribute. We do this here to prevent the
# glanceclient for a V2 image model from throwing an
# exception from warlock when trying to access an is_public
# attribute.
if hasattr(image, 'visibility'):
return str(image.visibility).lower() == 'public'
else:
return image.is_public
if context.is_admin or _is_image_public(image):
return True
properties = image.properties
if context.project_id and ('owner_id' in properties):
return str(properties['owner_id']) == str(context.project_id)
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(context.project_id)
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
def _translate_to_glance(image_meta):
image_meta = _convert_to_string(image_meta)
image_meta = _remove_read_only(image_meta)
return image_meta
def _translate_from_glance(image):
image_meta = _extract_attributes(image)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert_from_string(image_meta)
return image_meta
def _convert_timestamps_to_datetimes(image_meta):
"""Returns image with timestamp fields converted to datetime objects."""
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image_meta.get(attr):
image_meta[attr] = timeutils.parse_isotime(image_meta[attr])
return image_meta
# NOTE(bcwaldon): used to store non-string data in glance metadata
def _json_loads(properties, attr):
prop = properties[attr]
if isinstance(prop, six.string_types):
properties[attr] = jsonutils.loads(prop)
def _json_dumps(properties, attr):
prop = properties[attr]
if not isinstance(prop, six.string_types):
properties[attr] = jsonutils.dumps(prop)
_CONVERT_PROPS = ('block_device_mapping', 'mappings')
def _convert(method, metadata):
metadata = copy.deepcopy(metadata)
properties = metadata.get('properties')
if properties:
for attr in _CONVERT_PROPS:
if attr in properties:
method(properties, attr)
return metadata
def _convert_from_string(metadata):
return _convert(_json_loads, metadata)
def _convert_to_string(metadata):
return _convert(_json_dumps, metadata)
def _extract_attributes(image):
#NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
# a get(), resulting in a useless request back to glance. This list is
# therefore sorted, with dependent attributes as the end
# 'deleted_at' depends on 'deleted'
# 'checksum' depends on 'status' == 'active'
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'is_public']
output = {}
for attr in IMAGE_ATTRIBUTES:
if attr == 'deleted_at' and not output['deleted']:
output[attr] = None
elif attr == 'checksum' and output['status'] != 'active':
output[attr] = None
else:
output[attr] = getattr(image, attr)
output['properties'] = getattr(image, 'properties', {})
return output
def _remove_read_only(image_meta):
IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at']
output = copy.deepcopy(image_meta)
for attr in IMAGE_ATTRIBUTES:
if attr in output:
del output[attr]
return output
def _reraise_translated_image_exception(image_id):
"""Transform the exception for the image but keep its traceback intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_image_exception(image_id, exc_value)
raise new_exc, None, exc_trace
def _reraise_translated_exception():
"""Transform the exception but keep its traceback intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_plain_exception(exc_value)
raise new_exc, None, exc_trace
def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.ImageNotAuthorized(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(unicode(exc_value))
return exc_value
def _translate_plain_exception(exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.Forbidden(unicode(exc_value))
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.NotFound(unicode(exc_value))
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(unicode(exc_value))
return exc_value
def get_remote_image_service(context, image_href):
"""Create an image_service and parse the id from the given image_href.
The image_href param can be an href of the form
'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3',
or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the
image_href is a standalone id, then the default image service is returned.
:param image_href: href that describes the location of an image
:returns: a tuple of the form (image_service, image_id)
"""
#NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
# standalone image ID
if '/' not in str(image_href):
image_service = get_default_image_service()
return image_service, image_href
try:
(image_id, glance_host, glance_port, use_ssl) = \
_parse_image_ref(image_href)
glance_client = GlanceClientWrapper(context=context,
host=glance_host, port=glance_port, use_ssl=use_ssl)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)
image_service = GlanceImageService(client=glance_client)
return image_service, image_id
def get_default_image_service():
return GlanceImageService()
class UpdateGlanceImage(object):
def __init__(self, context, image_id, metadata, stream):
self.context = context
self.image_id = image_id
self.metadata = metadata
self.image_stream = stream
def start(self):
image_service, image_id = (
get_remote_image_service(self.context, self.image_id))
image_service.update(self.context, image_id, self.metadata,
self.image_stream, purge_props=False)
|
|
import argparse
import json
from pathlib import Path
from typing import Iterator, List, Dict
from shutil import copyfile
import pytest
import torch
from flaky import flaky
from allennlp.commands.evaluate import evaluate_from_args, Evaluate
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.data_loaders import TensorDict
from allennlp.models import Model
class DummyDataLoader:
def __init__(self, outputs: List[TensorDict]) -> None:
super().__init__()
self._outputs = outputs
def __iter__(self) -> Iterator[TensorDict]:
yield from self._outputs
def __len__(self):
return len(self._outputs)
def set_target_device(self, _):
pass
class DummyModel(Model):
def __init__(self) -> None:
super().__init__(None) # type: ignore
def forward(self, **kwargs) -> Dict[str, torch.Tensor]: # type: ignore
return kwargs
class TestEvaluate(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.parser = argparse.ArgumentParser(description="Testing")
subparsers = self.parser.add_subparsers(title="Commands", metavar="")
Evaluate().add_subparser(subparsers)
@flaky
def test_evaluate_from_args(self):
kebab_args = [
"evaluate",
str(
self.FIXTURES_ROOT / "simple_tagger_with_span_f1" / "serialization" / "model.tar.gz"
),
str(self.FIXTURES_ROOT / "data" / "conll2003.txt"),
"--cuda-device",
"-1",
]
args = self.parser.parse_args(kebab_args)
metrics = evaluate_from_args(args)
assert metrics.keys() == {
"accuracy",
"accuracy3",
"precision-overall",
"recall-overall",
"f1-measure-overall",
"loss",
}
def test_output_file_evaluate_from_args(self):
output_file = str(self.TEST_DIR / "metrics.json")
predictions_output_file = str(self.TEST_DIR / "predictions.jsonl")
kebab_args = [
"evaluate",
str(
self.FIXTURES_ROOT / "simple_tagger_with_span_f1" / "serialization" / "model.tar.gz"
),
str(self.FIXTURES_ROOT / "data" / "conll2003.txt"),
"--cuda-device",
"-1",
"--output-file",
output_file,
"--predictions-output-file",
predictions_output_file,
]
args = self.parser.parse_args(kebab_args)
computed_metrics = evaluate_from_args(args)
with open(output_file, "r") as file:
saved_metrics = json.load(file)
assert computed_metrics == saved_metrics
with open(predictions_output_file, "r") as file:
for line in file:
prediction = json.loads(line.strip())
assert "tags" in prediction
def test_multiple_output_files_evaluate_from_args(self):
data_file = Path(self.FIXTURES_ROOT / "data" / "conll2003.txt")
paths = []
out_paths = []
pred_paths = []
for i in range(3):
tmp_path = self.TEST_DIR.joinpath(f"TEST{i}.txt")
# Need to create paths to check when they do not exist
out_paths.append(tmp_path.parent.joinpath(f"OUTPUTS{i}.json"))
pred_paths.append(tmp_path.parent.joinpath(f"PREDS{i}.txt"))
copyfile(data_file, tmp_path)
paths.append(tmp_path)
kebab_args = [
"evaluate",
str(
self.FIXTURES_ROOT / "simple_tagger_with_span_f1" / "serialization" / "model.tar.gz"
),
",".join(map(str, paths)),
"--cuda-device",
"-1",
"--output-file",
",".join(map(str, out_paths)),
"--predictions-output-file",
",".join(map(str, pred_paths)),
]
args = self.parser.parse_args(kebab_args)
computed_metrics = evaluate_from_args(args)
computed_by_file = {}
for k, v in computed_metrics.items():
fn, *metric_name = k.split("_")
if fn not in computed_by_file:
computed_by_file[fn] = {}
computed_by_file[fn]["_".join(metric_name)] = v
assert len(computed_by_file) == len(paths)
expected_input_data = data_file.read_text("utf-8")
for i, p in enumerate(paths):
# Make sure it was not modified
assert p.read_text("utf-8") == expected_input_data
assert p.stem in computed_by_file, f"paths[{i}]={p.stem}"
assert out_paths[i].exists(), f"paths[{i}]={p.stem}"
saved_metrics = json.loads(out_paths[i].read_text("utf-8"))
assert saved_metrics == computed_by_file[p.stem], f"paths[{i}]={p.stem}"
assert pred_paths[i].exists(), f"paths[{i}]={p.stem}"
def test_evaluate_works_with_vocab_expansion(self):
archive_path = str(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
# snli2 has a extra token ("seahorse") in it.
evaluate_data_path = str(
self.FIXTURES_ROOT / "data" / "text_classification_json" / "imdb_corpus2.jsonl"
)
embeddings_filename = str(
self.FIXTURES_ROOT / "data" / "unawarded_embeddings.gz"
) # has only unawarded vector
embedding_sources_mapping = json.dumps(
{"_text_field_embedder.token_embedder_tokens": embeddings_filename}
)
kebab_args = ["evaluate", archive_path, evaluate_data_path, "--cuda-device", "-1"]
# TODO(mattg): the unawarded_embeddings.gz file above doesn't exist, but this test still
# passes. This suggests that vocab extension in evaluate isn't currently doing anything,
# and so it is broken.
# Evaluate 1 with no vocab expansion,
# Evaluate 2 with vocab expansion with no pretrained embedding file.
# Evaluate 3 with vocab expansion with given pretrained embedding file.
metrics_1 = evaluate_from_args(self.parser.parse_args(kebab_args))
metrics_2 = evaluate_from_args(self.parser.parse_args(kebab_args + ["--extend-vocab"]))
metrics_3 = evaluate_from_args(
self.parser.parse_args(
kebab_args + ["--embedding-sources-mapping", embedding_sources_mapping]
)
)
assert metrics_1 != metrics_2
assert metrics_2 != metrics_3
@pytest.mark.parametrize("auto_names", ["NONE", "METRICS", "PREDS", "ALL"])
def test_auto_names_creates_files(self, auto_names):
data_file = Path(self.FIXTURES_ROOT / "data" / "conll2003.txt")
paths = []
out_paths = []
pred_paths = []
for i in range(5):
tmp_path = self.TEST_DIR.joinpath(f"TEST{i}.txt")
# Need to create paths to check when they do not exist
out_paths.append(tmp_path.parent.joinpath(f"OUTPUTS{i}.json"))
pred_paths.append(tmp_path.parent.joinpath(f"PREDS{i}.txt"))
copyfile(data_file, tmp_path)
paths.append(tmp_path)
kebab_args = [
"evaluate",
str(
self.FIXTURES_ROOT / "simple_tagger_with_span_f1" / "serialization" / "model.tar.gz"
),
",".join(map(str, paths)),
"--cuda-device",
"-1",
"--output-file",
",".join(map(str, out_paths)),
"--predictions-output-file",
",".join(map(str, pred_paths)),
"--auto-names",
auto_names,
]
args = self.parser.parse_args(kebab_args)
_ = evaluate_from_args(args)
expected_input_data = data_file.read_text("utf-8")
for i, p in enumerate(paths):
# Make sure it was not modified
assert p.read_text("utf-8") == expected_input_data
if auto_names == "METRICS" or auto_names == "ALL":
assert not out_paths[i].exists()
assert p.parent.joinpath(f"{p.stem}.outputs").exists()
else:
assert out_paths[i].exists()
assert not p.parent.joinpath(f"{p.stem}.outputs").exists()
if auto_names == "PREDS" or auto_names == "ALL":
assert not pred_paths[i].exists()
assert p.parent.joinpath(f"{p.stem}.preds").exists()
else:
assert pred_paths[i].exists()
assert not p.parent.joinpath(f"{p.stem}.preds").exists()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Written by Ulrich Germann on the basis of contrib/server/client.py.
# This script simulates post-editing of MT output and incrementally
# updates the dynamic phrase tables in the moses server.
import xmlrpclib,datetime,argparse,sys,os,time
import moses
from moses import MosesServer
from subprocess import *
mserver = moses.MosesServer()
# We must perform some custom argument processing, as moses parameter
# specifications do not comply with the standards used in standard
# argument parsing packages; an isolated double dash separates script
# arguments from moses arguments
def split_args(all_args):
"""
Split argument list all_args into arguments specific to this script and
arguments relating to the moses server. An isolated double dash acts as
the separator between the two types of arguments.
"""
my_args = []
mo_args = []
arglist = mo_args
i = 0
# IMPORTANT: the code below must be coordinated with
# - the evolution of moses command line arguments
# - mert-moses.pl
while i < len(all_args):
# print i,"MY_ARGS", my_args
# print i,"MO_ARGS", mo_args
if all_args[i] == "--[":
arglist = my_args
elif all_args[i] == "--]":
arglist = mo_args
elif all_args[i] == "-i" or all_args[i] == "-input-file":
my_args.extend(["-i",all_args[i+1]])
i += 1
elif all_args[i] == "-inputtype":
if all_args[i+1] != "0":
# not yet supported! Therefore:
errmsg = "FATAL ERROR: %s "%sys.argv[0]
errmsg += "only supports plain text input at this point."
raise Exception(errsmg)
# my_args.extend(["--input-type",all_args[i+1]])
i += 1
elif all_args[i] == "-lattice-samples":
# my_args.extend(["--lattice-sample",all_args[i+2]])
# my_args.extend(["--lattice-sample-file",all_args[i+1]])
# mo_args[i:i+3] = []
# i += 2
# This is not yet supported! Therefore:
errmsg = "FATAL ERROR: %s "%sys.argv[0]
errmsg += "does not yet support lattice sampling."
raise Exception(errsmg)
elif all_args[i] == "-n-best-list":
my_args.extend(["--nbest",all_args[i+2]])
my_args.extend(["--nbest-file",all_args[i+1]])
i += 2
elif all_args[i] == "-n-best-distinct":
my_args.extend(["-u"])
else:
arglist.append(all_args[i])
pass
i += 1
pass
return my_args,mo_args
def interpret_args(my_args):
"""
Parse script-specific argument list.
"""
aparser = argparse.ArgumentParser()
aparser.add_argument("-s","--server-cmd",default="mosesserver",
dest="servercmd", help="path to moses server command")
aparser.add_argument("--url",help="URL of external moses server.")
aparser.add_argument("-p","--port", type=int, default=7447,
help="port number to be used for server")
# input / output
aparser.add_argument("-i","--input",help="source file",default="-")
aparser.add_argument("-r","--ref",help="reference translation",default=None)
aparser.add_argument("-a","--aln",help="alignment",default=None)
aparser.add_argument("-o","--output",default="-",help="output file")
aparser.add_argument("-d","--debug",action="store_true",help="debug mode")
# moses reporting options
aparser.add_argument("-A","--with-alignment", dest="A",
help="include alignment in output", action="store_true")
aparser.add_argument("-G","--with-graph",type=bool, default=False, dest="G",
help="include search graph info in output")
aparser.add_argument("-T","--with-transopt",type=bool, default=False, dest = "T",
help="include translation options info in output")
aparser.add_argument("-F","--report-all-factors", action="store_true",dest="F",
help="report all factors")
aparser.add_argument("-n","--nbest",type=int,dest="nbest",default=0,
help="size of nbest list")
aparser.add_argument("-N","--nbest-file",dest="nbestFile",default=0,
help="output file for nbest list")
aparser.add_argument("-u","--nbest-distinct",type=bool,dest="U",default=False,
help="report all factors")
return aparser.parse_args(my_args)
def translate(proxy, args, line):
if type(line) is unicode:
param = { 'text' : line.strip().encode('utf8') }
elif type(line) is str:
param = { 'text' : line.strip() }
else:
raise Exception("Can't handle input")
if args.A: param['align'] = True
if args.T: param['topt'] = True
if args.F: param['report-all-factors'] = True
if args.nbest:
param['nbest'] = int(args.nbest)
param['add-score-breakdown'] = True
pass
if args.U:
param['nbest-distinct'] = True
pass
attempts = 0
while attempts < 20:
t1 = time.time()
try:
return proxy.translate(param)
# except xmlrpclib.Fault as e:
# except xmlrpclib.ProtocolError as e:
# except xmlrpclib.ResponseError as e:
except xmlrpclib.Error as e:
time.sleep(2) # give all the stderr stuff a chance to be flushed
print >>sys.stderr," XMLRPC error:",e
print >>sys.stderr, "Input was"
print >>sys.stderr, param
sys.exit(1)
except IOError as e:
print >>sys.stderr,"I/O error({0}): {1}".format(e.errno, e.strerror)
time.sleep(5)
except:
serverstatus = mserver.process.poll()
if serverstatus == None:
print >>sys.stderr, "Connection failed after %f seconds"%(time.time()-t1)
attempts += 1
if attempts > 10:
time.sleep(10)
else:
time.sleep(5)
pass
else:
print >>sys.stderr, "Oopsidaisy, server exited with code %d (signal %d)"\
%(serverstatus/256,serverstatus%256)
pass
pass
pass
raise Exception("Exception: could not reach translation server.")
def read_data(fname):
"""
Read and return data (source, target or alignment) from file fname.
"""
if fname[-3:] == ".gz":
foo = Popen(["zcat",fname],stdout=PIPE)\
.communicate()[0]\
.strip().split('\n')
else:
foo = [x.strip() for x in open(fname).readlines()]
pass
return foo
def repack_result(idx,result):
global args
if args.nbest:
for h in result['nbest']:
fields = [idx,h['hyp'],h['fvals'],h['totalScore']]
for i in xrange(len(fields)):
if type(fields[i]) is unicode:
fields[i] = fields[i].encode('utf-8')
pass
pass
# print fields
print >>NBestFile,"%d ||| %s ||| %s ||| %f"%tuple(fields)
pass
pass
if 'align' in result:
t = result['text'].split()
span = ''
i = 0
k = 0
for a in result['align']:
k = a['tgt-start']
if k: print " ".join(t[i:k]).encode('utf8'),span,
i = k
span = "|%d %d|"%(a['src-start'],a['src-end'])
pass
print " ".join(t[k:]).encode('utf8'),span
pass
else:
print result['text'].encode('utf8')
pass
return
if __name__ == "__main__":
my_args, mo_args = split_args(sys.argv[1:])
# print "MY ARGS", my_args
# print "MO_ARGS", mo_args
global args
args = interpret_args(my_args)
if "-show-weights" in mo_args:
# this is for use during tuning, where moses is called to get a list of
# feature names
devnull = open(os.devnull,"w")
mo = Popen(mserver.cmd + mo_args,stdout=PIPE,stderr=devnull)
print mo.communicate()[0].strip()
sys.exit(0)
pass
if args.nbest:
if args.nbestFile:
NBestFile = open(args.nbestFile,"w")
else:
NBestFile = sys.stdout
pass
pass
ref = None
aln = None
if args.ref: ref = read_data(args.ref)
if args.aln: aln = read_data(args.aln)
if ref and aln:
try:
mo_args.index("--serial")
except:
mo_args.append("--serial")
pass
pass
if args.url:
mserver.connect(args.url)
else:
mserver.start(args=mo_args, port=args.port, debug=args.debug)
pass
if (args.input == "-"):
line = sys.stdin.readline()
idx = 0
while line:
result = translate(mserver.proxy,args,line)
repack_result(idx,result)
line = sys.stdin.readline()
idx += 1
pass
pass
else:
src = read_data(args.input)
for i in xrange(len(src)):
result = translate(mserver.proxy,args,src[i])
repack_result(i,result)
if args.debug:
print >>sys.stderr, result['text'].encode('utf-8')
pass
if ref and aln:
result = mserver.proxy.updater({'source' : src[i],
'target' : ref[i],
'alignment' : aln[i]})
pass
pass
pass
pass
|
|
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils import timezone
from .models import (
Comment,
Rating,
)
from administration.models import (
Application,
Flag,
)
from submissions.models import Submission
from usermgmt.models import (
Notification,
Profile,
)
class BaseSocialViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.foo = User.objects.create_user('foo', 'foo@example.com',
'a good password')
cls.foo.profile = Profile(profile_raw='Wow!',
display_name='Mx Foo Bar')
cls.foo.profile.save()
cls.bar = User.objects.create_user('bar', 'bar@example.com',
'another good password')
cls.bar.profile = Profile(profile_raw='Whoa', display_name='Bad Wolf')
cls.bar.profile.save()
class TestWatchUserView(BaseSocialViewTestCase):
def test_user_watched(self):
self.client.login(username='foo', password='a good password')
response = self.client.post(reverse('social:watch_user',
args=('bar',)),
follow=True)
self.assertContains(response, "You are now watching bar!")
self.assertIn(self.bar, self.foo.profile.watched_users.all())
notification = Notification.objects.get(pk=1)
self.assertEqual(notification.notification_type, Notification.WATCH)
def test_cant_watch_self(self):
self.client.login(username='foo', password='a good password')
response = self.client.post(reverse('social:watch_user',
args=('foo',)),
follow=True)
self.assertContains(response, "watch yourself.")
def test_already_watched(self):
self.client.login(username='foo', password='a good password')
self.foo.profile.watched_users.add(self.bar)
response = self.client.post(reverse('social:watch_user',
args=('bar',)),
follow=True)
self.assertContains(response, 'You are already watching this user.')
def test_404(self):
self.client.login(username='foo', password='a good password')
response = self.client.post(reverse('social:watch_user',
args=('bad-wolf',)),
follow=True)
self.assertEqual(response.status_code, 404)
class TestUnwatchUserView(BaseSocialViewTestCase):
def test_user_unwatched(self):
self.client.login(username='foo', password='a good password')
self.foo.profile.watched_users.add(self.bar)
response = self.client.post(reverse('social:unwatch_user',
args=('bar',)),
follow=True)
self.assertContains(response, "You are no longer watching bar.")
self.assertNotIn(self.bar, self.foo.profile.watched_users.all())
def test_notification_removed(self):
self.client.login(username='foo', password='a good password')
self.foo.profile.watched_users.add(self.bar)
notification = Notification(
target=self.bar,
source=self.foo,
notification_type=Notification.WATCH)
notification.save()
response = self.client.post(reverse('social:unwatch_user',
args=('bar',)),
follow=True)
self.assertContains(response, "You are no longer watching bar.")
self.assertNotIn(self.bar, self.foo.profile.watched_users.all())
with self.assertRaises(Notification.DoesNotExist):
notification.refresh_from_db()
def test_cant_unwatch_self(self):
self.client.login(username='foo', password='a good password')
response = self.client.post(reverse('social:unwatch_user',
args=('foo',)),
follow=True)
self.assertContains(response, "unwatch yourself.")
def test_not_watching(self):
self.client.login(username='foo', password='a good password')
response = self.client.post(reverse('social:unwatch_user',
args=('bar',)),
follow=True)
self.assertContains(response, 'You are not watching this user.')
def test_404(self):
self.client.login(username='foo', password='a good password')
response = self.client.post(reverse('social:unwatch_user',
args=('bad-wolf',)),
follow=True)
self.assertEqual(response.status_code, 404)
class TestBlockUserView(BaseSocialViewTestCase):
def test_user_blocked(self):
self.client.login(username='foo', password='a good password')
response = self.client.post(reverse('social:block_user',
args=('bar',)),
follow=True)
self.assertContains(response, 'You are now blocking bar from viewing '
'your profile and submissions!')
self.assertIn(self.bar, self.foo.profile.blocked_users.all())
def test_cant_block_self(self):
self.client.login(username='foo', password='a good password')
response = self.client.post(reverse('social:block_user',
args=('foo',)),
follow=True)
self.assertContains(response, "block yourself.")
def test_already_blocked(self):
self.client.login(username='foo', password='a good password')
self.foo.profile.blocked_users.add(self.bar)
response = self.client.post(reverse('social:block_user',
args=('bar',)),
follow=True)
self.assertContains(response, 'You are already blocking this user.')
def test_404(self):
self.client.login(username='foo', password='a good password')
response = self.client.post(reverse('social:block_user',
args=('bad-wolf',)),
follow=True)
self.assertEqual(response.status_code, 404)
class TestUnblockUserView(BaseSocialViewTestCase):
def test_user_unblocked(self):
self.client.login(username='foo', password='a good password')
self.foo.profile.blocked_users.add(self.bar)
response = self.client.get(reverse('social:unblock_user',
args=('bar',)),
follow=True)
self.assertContains(response, 'Are you sure that you want to do this?')
response = self.client.post(reverse('social:unblock_user',
args=('bar',)),
follow=True)
self.assertContains(response, 'You are no longer blocking bar')
def test_cant_unblock_self(self):
self.client.login(username='foo', password='a good password')
response = self.client.get(reverse('social:unblock_user',
args=('foo',)),
follow=True)
self.assertContains(response, "unblock yourself.")
def test_not_blocking(self):
self.client.login(username='foo', password='a good password')
response = self.client.get(reverse('social:unblock_user',
args=('bar',)),
follow=True)
self.assertContains(response, 'You are not blocking this user.')
def test_404(self):
self.client.login(username='foo', password='a good password')
response = self.client.get(reverse('social:unblock_user',
args=('bad-wolf',)),
follow=True)
self.assertEqual(response.status_code, 404)
class TestMessageUserView(BaseSocialViewTestCase):
pass
class BaseSocialSubmissionViewTestCase(BaseSocialViewTestCase):
@classmethod
def setUpTestData(cls):
super(BaseSocialSubmissionViewTestCase, cls).setUpTestData()
cls.submission = Submission(
owner=cls.foo,
title="Submission",
description_raw="Description",
content_raw="Content",
ctime=timezone.now())
cls.submission.save(update_content=True)
cls.comment = Comment(
owner=cls.bar,
target_object_owner=cls.foo,
object_model=cls.submission,
body_raw="Comment")
cls.comment.save()
class TestFavoriteSubmissionView(BaseSocialSubmissionViewTestCase):
def test_submission_favorited(self):
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:favorite_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, 'Submission favorited!')
notification = Notification.objects.get(pk=1)
self.assertEqual(notification.notification_type,
Notification.FAVORITE)
def test_cant_favorite_own_submission(self):
self.client.login(username='foo',
password='a good password')
response = self.client.post(reverse(
'social:favorite_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, 'You cannot favorite your own '
'submission')
def test_cant_favorite_submission_if_blocked(self):
self.foo.profile.blocked_users.add(self.bar)
self.foo.profile.save()
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:favorite_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, 'You cannot favorite this submission, '
'as you have been blocked by the author.',
status_code=403)
def test_cant_favorite_submission_if_already_favorited(self):
self.bar.profile.favorited_submissions.add(self.submission)
self.bar.profile.save()
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:favorite_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, 'You have already favorited this '
'submission')
class TestUnfavoriteSubmissionView(BaseSocialSubmissionViewTestCase):
def test_submission_unfavorited(self):
self.bar.profile.favorited_submissions.add(self.submission)
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.FAVORITE,
subject=self.submission,
).save()
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:unfavorite_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, 'Submission removed from favorites.')
self.assertEquals(self.foo.notification_set.count(), 0)
def test_cant_unfavorite_own_submission(self):
self.client.login(username='foo',
password='a good password')
response = self.client.post(reverse(
'social:unfavorite_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, 'You cannot unfavorite your own '
'submission')
def test_cant_unfavorite_submission_if_blocked(self):
self.foo.profile.blocked_users.add(self.bar)
self.foo.profile.save()
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:unfavorite_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, 'You cannot unfavorite this submission, '
'as you have been blocked by the author.',
status_code=403)
def test_cant_unfavorite_submission_if_already_unfavorited(self):
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:unfavorite_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, "You haven't yet favorited this "
'submission')
class TestRateSubmissionView(BaseSocialSubmissionViewTestCase):
def test_submission_rated(self):
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:rate_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission'
}), {'rating': 3}, follow=True)
self.assertContains(response, 'Submission successfully rated.')
notification = Notification.objects.get(pk=1)
self.assertEqual(notification.notification_type, Notification.RATING)
def test_submission_rerated(self):
rating = Rating(
owner=self.bar,
submission=self.submission,
rating=5)
rating.save()
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:rate_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission'
}), {'rating': 3}, follow=True)
rating.refresh_from_db()
self.assertContains(response, 'Existing rating updated.')
self.assertEqual(rating.rating, 3)
notification = Notification.objects.get(pk=1)
self.assertEqual(notification.notification_type, Notification.RATING)
def test_invalid_rating(self):
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:rate_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission'
}), {'rating': 'asdf'}, follow=True)
self.assertContains(response, 'Invalid rating specified.')
response = self.client.post(reverse(
'social:rate_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission'
}), {'rating': -42}, follow=True)
self.assertContains(response, 'Invalid rating specified.')
def test_cant_rate_own_submission(self):
self.client.login(username='foo',
password='a good password')
response = self.client.post(reverse(
'social:rate_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission'
}), {'rating': 5}, follow=True)
self.assertContains(response, 'You cannot rate your own submission.')
def test_cant_rate_submission_if_blocked(self):
self.foo.profile.blocked_users.add(self.bar)
self.foo.profile.save()
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:rate_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission'
}), {'rating': 3}, follow=True)
self.assertContains(response, 'You cannot rate this submission, as '
'you have been blocked by the author.',
status_code=403)
class TestEnjoySubmissionView(BaseSocialSubmissionViewTestCase):
def test_submission_enjoyed(self):
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:enjoy_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, 'Enjoy vote added to submission!')
notification = Notification.objects.get(pk=1)
self.assertEqual(notification.notification_type, Notification.ENJOY)
def test_cant_enjoy_submission_if_disallowed(self):
self.submission.can_enjoy = False
self.submission.save(update_content=True)
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:enjoy_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, 'The author has disabled enjoy voting '
'on this submission.')
def test_cant_enjoy_own_submission(self):
self.client.login(username='foo',
password='a good password')
response = self.client.post(reverse(
'social:enjoy_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, 'You cannot add enjoy votes to your '
'own submission.')
def test_cant_enjoy_submissions_if_blocked(self):
self.foo.profile.blocked_users.add(self.bar)
self.foo.profile.save()
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse(
'social:enjoy_submission', kwargs={
'username': 'foo',
'submission_id': 1,
'submission_slug': 'submission',
}), follow=True)
self.assertContains(response, 'You cannot add enjoy votes to this '
'submission, as you have been blocked by the '
'author.', status_code=403)
class TestCommentModel(BaseSocialSubmissionViewTestCase):
@classmethod
def setUpTestData(cls):
super(TestCommentModel, cls).setUpTestData()
cls.comment = Comment(
owner=cls.foo,
target_object_owner=cls.foo,
object_model=cls.submission,
body_raw='foo')
cls.comment.save()
def test_str(self):
self.assertEqual(
self.comment.__str__(),
"Mx Foo Bar's comment on Submission by ~foo (id:1)")
def test_unicode(self):
self.assertEqual(
self.comment.__unicode__(),
"Mx Foo Bar's comment on Submission by ~foo (id:1)")
def test_get_active_flag(self):
flag = Flag(
flagged_by=self.foo,
object_model=self.comment,
flagged_object_owner=self.foo,
flag_type=Flag.CONTENT,
subject='user + submission1 + content + active',
body_raw='Test flag')
flag.save()
self.assertEqual(self.comment.get_active_flag(), flag)
class TestPostCommentView(BaseSocialSubmissionViewTestCase):
def test_form_renders_if_logged_in(self):
self.client.login(username='bar',
password='another good password')
response = self.client.get(self.submission.get_absolute_url())
self.assertContains(
response,
'<input type="hidden" name="parent" value="{}" />'.format(
self.comment.id))
self.assertContains(
response,
'<input id="id_object_id" name="object_id" type="hidden" '
'value="{}" />'.format(self.submission.id),
2)
def test_form_not_present_if_logged_out(self):
response = self.client.get(self.submission.get_absolute_url())
self.assertNotContains(
response,
'Post reply')
self.assertNotContains(
response,
'Add comment')
def test_respects_can_comment_flag(self):
self.submission.can_comment = False
self.submission.save()
self.client.login(username='bar',
password='another good password')
response = self.client.get(self.submission.get_absolute_url())
self.assertNotContains(
response,
'Post reply')
self.assertNotContains(
response,
'Add comment')
def test_post_comment(self):
self.client.login(username='bar',
password='another good password')
ctype = ContentType.objects.get(app_label='submissions',
model='submission')
response = self.client.post(reverse('social:post_comment'),
{
'content_type': ctype.id,
'object_id': self.submission.id,
'body_raw': 'A Second Comment',
}, follow=True)
self.assertEqual(Comment.objects.count(), 2)
self.assertContains(response, 'A Second Comment')
def test_nest_comment(self):
self.client.login(username='bar',
password='another good password')
ctype = ContentType.objects.get(app_label='submissions',
model='submission')
response = self.client.post(reverse('social:post_comment'),
{
'content_type': ctype.id,
'object_id': self.submission.id,
'body_raw': 'A Second Comment',
'parent': self.comment.id,
}, follow=True)
self.assertEqual(Comment.objects.count(), 2)
self.assertContains(response, 'A Second Comment')
self.assertContains(response, '<div class="comment-reply">')
def test_notifications(self):
baz = User.objects.create_user('baz', 'baz@example.com',
'another good password')
baz.profile = Profile(profile_raw='Bazzo', display_name='Bad Wolf')
baz.profile.save()
self.client.login(username='baz',
password='another good password')
ctype = ContentType.objects.get(app_label='submissions',
model='submission')
self.client.post(reverse('social:post_comment'),
{
'content_type': ctype.id,
'object_id': self.submission.id,
'body_raw': 'A Second Comment',
'parent': self.comment.id,
}, follow=True)
self.assertEqual(Comment.objects.count(), 2)
self.assertEqual(
[x.notification_type for x in Notification.objects.all()],
[
Notification.COMMENT_REPLY,
Notification.SUBMISSION_COMMENT,
])
def test_fail(self):
self.submission.can_comment = False
self.submission.save()
self.client.login(username='bar',
password='another good password')
ctype = ContentType.objects.get(app_label='submissions',
model='submission')
response = self.client.post(reverse('social:post_comment'),
{
'content_type': ctype.id,
'object_id': self.submission.id,
'body_raw': 'A Second Comment',
}, follow=True)
self.assertContains(response, 'There was an error posting that '
'comment')
class TestDeleteCommentView(BaseSocialSubmissionViewTestCase):
def test_renders_form_if_applicable(self):
# Logged out users do not see
response = self.client.get(self.submission.get_absolute_url())
self.assertNotContains(
response,
'Delete comment')
# Unrelated users do not see
baz = User.objects.create_user('baz', 'baz@example.com',
'another good password')
baz.profile = Profile(profile_raw='Bazzo', display_name='Bad Wolf')
baz.profile.save()
self.client.login(username='baz',
password='another good password')
response = self.client.get(self.submission.get_absolute_url())
self.assertNotContains(
response,
'Delete comment')
# Comment owner sees
self.client.logout()
self.client.login(username='bar',
password='another good password')
response = self.client.get(self.submission.get_absolute_url())
self.assertContains(
response,
'Delete comment')
# Page owner sees
self.client.logout()
self.client.login(username='foo',
password='a good password')
response = self.client.get(self.submission.get_absolute_url())
self.assertContains(
response,
'Delete comment')
def test_renders_deleted_comment(self):
self.comment.deleted = True
self.comment.save()
response = self.client.get(self.submission.get_absolute_url())
self.assertContains(
response,
'This comment has been deleted by the commenter.')
self.comment.deleted_by_object_owner = True
self.comment.save()
response = self.client.get(self.submission.get_absolute_url())
self.assertContains(
response,
'This comment has been deleted by the page owner.')
def test_comment_owner_delete(self):
self.client.login(username='bar',
password='another good password')
response = self.client.post(reverse('social:delete_comment'),
{
'comment_id': self.comment.id,
}, follow=True)
self.assertContains(
response,
'This comment has been deleted by the commenter.')
def test_target_owner_delete(self):
self.client.login(username='foo',
password='a good password')
response = self.client.post(reverse('social:delete_comment'),
{
'comment_id': self.comment.id,
}, follow=True)
self.assertContains(
response,
'This comment has been deleted by the page owner.')
def test_notifications_wiped(self):
baz = User.objects.create_user('baz', 'baz@example.com',
'another good password')
baz.profile = Profile(profile_raw='Bazzo', display_name='Bad Wolf')
baz.profile.save()
self.client.login(username='baz',
password='another good password')
ctype = ContentType.objects.get(app_label='submissions',
model='submission')
self.client.post(reverse('social:post_comment'),
{
'content_type': ctype.id,
'object_id': self.submission.id,
'body_raw': 'A Second Comment',
'parent': self.comment.id,
}, follow=True)
self.assertEqual(Notification.objects.count(), 2)
self.client.post(reverse('social:delete_comment'),
{
'comment_id': 2,
}, follow=True)
self.assertEqual(Notification.objects.count(), 0)
def test_forbidden(self):
baz = User.objects.create_user('baz', 'baz@example.com',
'another good password')
baz.profile = Profile(profile_raw='Bazzo', display_name='Bad Wolf')
baz.profile.save()
self.client.login(username='baz',
password='another good password')
response = self.client.post(reverse('social:delete_comment'),
{
'comment_id': self.comment.id,
}, follow=True)
self.assertContains(
response,
'You may only delete a comment if you are the poster or the page '
'owner')
class TestNotificationBadges(BaseSocialSubmissionViewTestCase):
def test_empty_badges(self):
self.client.login(username='foo',
password='a good password')
response = self.client.get(reverse('core:front'))
self.assertContains(response, '<span class="badge"></span>', count=6)
def test_badges(self):
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.WATCH,
).save()
Notification(
target=self.foo,
source=self.bar,
subject=self.submission,
notification_type=Notification.FAVORITE,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.MESSAGE,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.APPLICATION_CLAIMED).save()
self.client.login(username='foo',
password='a good password')
response = self.client.get(reverse('core:front'))
self.assertContains(response, '<span class="badge">3</span>')
self.assertContains(response, '<span class="badge">1</span>',
count=5)
class TestViewNotificationsCategoriesView(BaseSocialSubmissionViewTestCase):
def test_no_notifications(self):
self.client.login(username='foo',
password='a good password')
response = self.client.get(reverse(
'social:view_notifications_categories'))
self.assertContains(response, '<h2>No notifications <small>Lucky '
'you!</small></h2>')
def test_notifications(self):
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.WATCH,
).save()
rating = Rating(owner=self.bar, submission=self.submission, rating=3)
rating.save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.RATING,
subject=rating,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.ENJOY,
subject=self.submission,
).save()
Notification(
target=self.foo,
source=self.bar,
subject=self.submission,
notification_type=Notification.FAVORITE,
).save()
comment = Comment(
owner=self.bar,
object_model=self.submission,
target_object_owner=self.foo,
body_raw='asdf'
)
comment.save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.SUBMISSION_COMMENT,
subject=comment,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.COMMENT_REPLY,
subject=comment,
).save()
Notification(
target=self.foo,
notification_type=Notification.PROMOTE,
subject=self.submission,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.HIGHLIGHT,
subject=self.submission,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.MESSAGE,
).save()
app = Application(
applicant=self.foo,
application_type=Application.AD).save()
Notification(
target=self.foo,
source=self.bar,
subject=app,
notification_type=Notification.APPLICATION_CLAIMED).save()
Notification(
target=self.foo,
source=self.bar,
subject=app,
notification_type=Notification.APPLICATION_RESOLVED).save()
self.client.login(username='foo',
password='a good password')
response = self.client.get(reverse(
'social:view_notifications_categories'))
self.assertContains(response, '>Administration notifications</h2>')
self.assertContains(response, '<h3>Application resolutions</h3>')
self.assertContains(response, '<h3>Application claims</h3>')
self.assertContains(response, '>Messages</h2>')
self.assertContains(response, '>User Notifications</h2>')
self.assertContains(response, '>Submission Notifications</h2>')
self.assertContains(response, '<h3>Favorites</h3>')
self.assertContains(response, '<h3>Ratings</h3>')
self.assertContains(response, '<h3>Enjoy votes</h3>')
self.assertContains(response, '<h3>Submission comments</h3>')
self.assertContains(response, '<h3>Comment replies</h3>')
self.assertContains(response, '<h3>Promotions</h3>')
self.assertContains(response, '<h3>Highlights</h3>')
self.assertContains(response, '<input type="checkbox" '
'name="notification_id"', count=11)
def test_expired_notifications(self):
self.foo.profile.expired_notifications = 5
self.foo.profile.save()
self.client.login(username='foo',
password='a good password')
response = self.client.get(reverse(
'social:view_notifications_categories'))
self.assertContains(response, 'You have 5 notifications that have '
'expired.')
class TestViewNotificationsTimelineView(BaseSocialSubmissionViewTestCase):
def test_no_notifications(self):
self.client.login(username='foo',
password='a good password')
response = self.client.get(reverse(
'social:view_notifications_timeline'))
self.assertContains(response, '<h2>No notifications <small>Lucky '
'you!</small></h2>')
def test_notifications(self):
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.WATCH,
).save()
Notification(
target=self.foo,
source=self.bar,
subject=self.submission,
notification_type=Notification.FAVORITE,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.MESSAGE,
).save()
self.client.login(username='foo',
password='a good password')
response = self.client.get(reverse(
'social:view_notifications_timeline'))
self.assertContains(response, '"list-group-item striped-item"',
count=3)
def test_notifications_paginate(self):
for i in range(1, 100):
Notification(
target=self.foo,
source=self.bar,
subject=self.submission,
notification_type=Notification.ENJOY,
).save()
self.client.login(username='foo',
password='a good password')
response = self.client.get(reverse(
'social:view_notifications_timeline'))
self.assertContains(response, '>(current)<')
self.assertContains(response, '2</a>')
response = self.client.get(reverse(
'social:view_notifications_timeline', kwargs={
'page': 50,
}))
self.assertContains(response, '>(current)<')
self.assertContains(response, '1</a>')
class TestRemoveNotificationsView(BaseSocialViewTestCase):
def test_removes_notifications(self):
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.WATCH,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.WATCH,
).save()
self.client.login(username='foo',
password='a good password')
response = self.client.post(reverse('social:remove_notifications'),
{'notification_id': [1, 2]},
follow=True)
self.assertContains(response, 'Notifications deleted.')
self.assertContains(response, '<h2>No notifications <small>Lucky '
'you!</small></h2>')
def test_ignores_missing_notifications(self):
self.client.login(username='foo',
password='a good password')
response = self.client.post(reverse('social:remove_notifications'),
{'notification_id': 42},
follow=True)
self.assertContains(response, 'Notifications deleted.')
self.assertContains(response, '<h2>No notifications <small>Lucky '
'you!</small></h2>')
def test_warns_and_stops_on_not_own_notification(self):
Notification(
target=self.bar,
source=self.foo,
notification_type=Notification.WATCH,
).save()
self.client.login(username='foo',
password='a good password')
response = self.client.post(reverse('social:remove_notifications'),
{'notification_id': 1},
follow=True)
self.assertContains(response, 'Permission denied', status_code=403)
class TestNukeNotificationsView(BaseSocialSubmissionViewTestCase):
def test_nukes_notifications(self):
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.WATCH,
).save()
rating = Rating(owner=self.bar, submission=self.submission, rating=3)
rating.save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.RATING,
subject=rating,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.ENJOY,
subject=self.submission,
).save()
Notification(
target=self.foo,
source=self.bar,
subject=self.submission,
notification_type=Notification.FAVORITE,
).save()
comment = Comment(
owner=self.bar,
object_model=self.submission,
target_object_owner=self.foo,
body_raw='asdf'
)
comment.save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.SUBMISSION_COMMENT,
subject=comment,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.COMMENT_REPLY,
subject=comment,
).save()
Notification(
target=self.foo,
notification_type=Notification.PROMOTE,
subject=self.submission,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.HIGHLIGHT,
subject=self.submission,
).save()
Notification(
target=self.foo,
source=self.bar,
notification_type=Notification.MESSAGE,
).save()
self.client.login(username='foo',
password='a good password')
response = self.client.post(reverse('social:nuke_notifications'),
follow=True)
self.assertContains(response, 'All notifications nuked.')
self.assertContains(response, '<h2>No notifications <small>Lucky '
'you!</small></h2>')
def test_clears_expired_notifivations(self):
self.foo.profile.expired_notifications = 5
self.foo.profile.save()
self.client.login(username='foo',
password='a good password')
response = self.client.post(reverse('social:nuke_notifications'),
follow=True)
self.assertContains(response, 'All notifications nuked.')
self.assertContains(response, '<h2>No notifications <small>Lucky '
'you!</small></h2>')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.