gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2013 Josh Durgin
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import mock
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import timeutils
import webob
from webob import exc
from nova.api.openstack.compute.contrib import assisted_volume_snapshots as \
assisted_snaps
from nova.api.openstack.compute.contrib import volumes
from nova.api.openstack.compute.plugins.v3 import volumes as volumes_v3
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.volume import cinder
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
FAKE_UUID_D = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
def fake_get_instance(self, context, instance_id, want_objects=False,
expected_attrs=None):
return fake_instance.fake_instance_obj(context, **{'uuid': instance_id})
def fake_get_volume(self, context, id):
return {'id': 'woot'}
def fake_attach_volume(self, context, instance, volume_id, device):
pass
def fake_detach_volume(self, context, instance, volume):
pass
def fake_swap_volume(self, context, instance,
old_volume_id, new_volume_id):
pass
def fake_create_snapshot(self, context, volume, name, description):
return {'id': 123,
'volume_id': 'fakeVolId',
'status': 'available',
'volume_size': 123,
'created_at': '2013-01-01 00:00:01',
'display_name': 'myVolumeName',
'display_description': 'myVolumeDescription'}
def fake_delete_snapshot(self, context, snapshot_id):
pass
def fake_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def fake_compute_volume_snapshot_create(self, context, volume_id,
create_info):
pass
def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'instance_uuid': instance_uuid,
'device_name': '/dev/fake0',
'delete_on_termination': 'False',
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': FAKE_UUID_A,
'volume_size': 1}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'instance_uuid': instance_uuid,
'device_name': '/dev/fake1',
'delete_on_termination': 'False',
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': FAKE_UUID_B,
'volume_size': 1})]
class BootFromVolumeTest(test.TestCase):
def setUp(self):
super(BootFromVolumeTest, self).setUp()
self.stubs.Set(compute_api.API, 'create',
self._get_fake_compute_api_create())
fakes.stub_out_nw_api(self.stubs)
self._block_device_mapping_seen = None
self._legacy_bdm_seen = True
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes', 'Block_device_mapping_v2_boot'])
def _get_fake_compute_api_create(self):
def _fake_compute_api_create(cls, context, instance_type,
image_href, **kwargs):
self._block_device_mapping_seen = kwargs.get(
'block_device_mapping')
self._legacy_bdm_seen = kwargs.get('legacy_bdm')
inst_type = flavors.get_flavor_by_flavor_id(2)
resv_id = None
return ([{'id': 1,
'display_name': 'test_server',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': IMAGE_UUID,
'user_id': 'fake',
'project_id': 'fake',
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
'progress': 0,
'fixed_ips': []
}], resv_id)
return _fake_compute_api_create
def test_create_root_volume(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping=[dict(
volume_id=1,
device_name='/dev/vda',
virtual='root',
delete_on_termination=False,
)]
))
req = webob.Request.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
init_only=('os-volumes_boot', 'servers')))
self.assertEqual(res.status_int, 202)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(len(self._block_device_mapping_seen), 1)
self.assertTrue(self._legacy_bdm_seen)
self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1)
self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
'/dev/vda')
def test_create_root_volume_bdm_v2(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping_v2=[dict(
source_type='volume',
uuid=1,
device_name='/dev/vda',
boot_index=0,
delete_on_termination=False,
)]
))
req = webob.Request.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
init_only=('os-volumes_boot', 'servers')))
self.assertEqual(res.status_int, 202)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(len(self._block_device_mapping_seen), 1)
self.assertFalse(self._legacy_bdm_seen)
self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1)
self.assertEqual(self._block_device_mapping_seen[0]['boot_index'],
0)
self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
'/dev/vda')
class VolumeApiTestV21(test.TestCase):
url_prefix = '/v2/fake'
def setUp(self):
super(VolumeApiTestV21, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
self.stubs.Set(cinder.API, "get_all", fakes.stub_volume_get_all)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes'])
self.context = context.get_admin_context()
self.app = self._get_app()
def _get_app(self):
return fakes.wsgi_app_v21()
def test_volume_create(self):
self.stubs.Set(cinder.API, "create", fakes.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = webob.Request.blank(self.url_prefix + '/os-volumes')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_dict = jsonutils.loads(resp.body)
self.assertIn('volume', resp_dict)
self.assertEqual(resp_dict['volume']['size'],
vol['size'])
self.assertEqual(resp_dict['volume']['displayName'],
vol['display_name'])
self.assertEqual(resp_dict['volume']['displayDescription'],
vol['display_description'])
self.assertEqual(resp_dict['volume']['availabilityZone'],
vol['availability_zone'])
def test_volume_create_bad(self):
def fake_volume_create(self, context, size, name, description,
snapshot, **param):
raise exception.InvalidInput(reason="bad request data")
self.stubs.Set(cinder.API, "create", fake_volume_create)
vol = {"size": '#$?',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
volumes.VolumeController().create, req, body)
def test_volume_index(self):
req = webob.Request.blank(self.url_prefix + '/os-volumes')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_detail(self):
req = webob.Request.blank(self.url_prefix + '/os-volumes/detail')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_show(self):
req = webob.Request.blank(self.url_prefix + '/os-volumes/123')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_volume_show_no_volume(self):
self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound)
req = webob.Request.blank(self.url_prefix + '/os-volumes/456')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertIn('Volume 456 could not be found.', resp.body)
def test_volume_delete(self):
req = webob.Request.blank(self.url_prefix + '/os-volumes/123')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_volume_delete_no_volume(self):
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound)
req = webob.Request.blank(self.url_prefix + '/os-volumes/456')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertIn('Volume 456 could not be found.', resp.body)
class VolumeApiTestV2(VolumeApiTestV21):
def setUp(self):
super(VolumeApiTestV2, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes'])
self.context = context.get_admin_context()
self.app = self._get_app()
def _get_app(self):
return fakes.wsgi_app()
class VolumeAttachTests(test.TestCase):
def setUp(self):
super(VolumeAttachTests, self).setUp()
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdms_get_all_by_instance)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.stubs.Set(cinder.API, 'get', fake_get_volume)
self.context = context.get_admin_context()
self.expected_show = {'volumeAttachment':
{'device': '/dev/fake0',
'serverId': FAKE_UUID,
'id': FAKE_UUID_A,
'volumeId': FAKE_UUID_A
}}
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.attachments = volumes.VolumeAttachmentController(self.ext_mgr)
def test_show(self):
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.show(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual(self.expected_show, result)
@mock.patch.object(compute_api.API, 'get',
side_effect=exception.InstanceNotFound(instance_id=FAKE_UUID))
def test_show_no_instance(self, mock_mr):
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_A)
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=None)
def test_show_no_bdms(self, mock_mr):
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_A)
def test_show_bdms_no_mountpoint(self):
FAKE_UUID_NOTEXIST = '00000000-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_NOTEXIST)
def test_detach(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual('202 Accepted', result.status)
def test_detach_vol_not_found(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_C)
@mock.patch('nova.objects.BlockDeviceMapping.is_root',
new_callable=mock.PropertyMock)
def test_detach_vol_root(self, mock_isroot):
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
mock_isroot.return_value = True
self.assertRaises(exc.HTTPForbidden,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_A)
def test_detach_volume_from_locked_server(self):
def fake_detach_volume_from_locked_server(self, context,
instance, volume):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume_from_locked_server)
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict, self.attachments.delete,
req, FAKE_UUID, FAKE_UUID_A)
def test_attach_volume(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body)
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
def test_attach_volume_to_locked_server(self):
def fake_attach_volume_to_locked_server(self, context, instance,
volume_id, device=None):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume_to_locked_server)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict, self.attachments.create,
req, FAKE_UUID, body)
def test_attach_volume_bad_id(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {
'volumeAttachment': {
'device': None,
'volumeId': 'TESTVOLUME',
}
}
req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPBadRequest, self.attachments.create,
req, FAKE_UUID, body)
def test_attach_volume_without_volumeId(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {
'volumeAttachment': {
'device': None
}
}
req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPBadRequest, self.attachments.create,
req, FAKE_UUID, body)
def _test_swap(self, uuid=FAKE_UUID_A, fake_func=None, body=None):
fake_func = fake_func or fake_swap_volume
self.stubs.Set(compute_api.API,
'swap_volume',
fake_func)
body = body or {'volumeAttachment': {'volumeId': FAKE_UUID_B,
'device': '/dev/fake'}}
req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
req.method = 'PUT'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
return self.attachments.update(req, FAKE_UUID, uuid, body)
def test_swap_volume_for_locked_server(self):
self.ext_mgr.extensions['os-volume-attachment-update'] = True
def fake_swap_volume_for_locked_server(self, context, instance,
old_volume, new_volume):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.ext_mgr.extensions['os-volume-attachment-update'] = True
self.assertRaises(webob.exc.HTTPConflict, self._test_swap,
fake_func=fake_swap_volume_for_locked_server)
def test_swap_volume_no_extension(self):
self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap)
def test_swap_volume(self):
self.ext_mgr.extensions['os-volume-attachment-update'] = True
result = self._test_swap()
self.assertEqual('202 Accepted', result.status)
def test_swap_volume_no_attachment(self):
self.ext_mgr.extensions['os-volume-attachment-update'] = True
self.assertRaises(exc.HTTPNotFound, self._test_swap, FAKE_UUID_C)
def test_swap_volume_without_volumeId(self):
self.ext_mgr.extensions['os-volume-attachment-update'] = True
body = {'volumeAttachment': {'device': '/dev/fake'}}
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_swap,
body=body)
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
for attr in ('id', 'volumeId', 'serverId', 'device'):
self.assertEqual(str(attach[attr]), tree.get(attr))
def _verify_volume(self, vol, tree):
self.assertEqual(tree.tag, 'volume')
for attr in ('id', 'status', 'size', 'availabilityZone', 'createdAt',
'displayName', 'displayDescription', 'volumeType',
'snapshotId'):
self.assertEqual(str(vol[attr]), tree.get(attr))
for child in tree:
self.assertIn(child.tag, ('attachments', 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_volume_attachment(vol['attachments'][0], child[0])
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertIn(gr_child.get("key"), not_seen)
self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
gr_child.text)
not_seen.remove(gr_child.get("key"))
self.assertEqual(0, len(not_seen))
def test_attach_show_create_serializer(self):
serializer = volumes.VolumeAttachmentTemplate()
raw_attach = dict(
id='vol_id',
volumeId='vol_id',
serverId='instance_uuid',
device='/foo')
text = serializer.serialize(dict(volumeAttachment=raw_attach))
tree = etree.fromstring(text)
self.assertEqual('volumeAttachment', tree.tag)
self._verify_volume_attachment(raw_attach, tree)
def test_attach_index_serializer(self):
serializer = volumes.VolumeAttachmentsTemplate()
raw_attaches = [dict(
id='vol_id1',
volumeId='vol_id1',
serverId='instance1_uuid',
device='/foo1'),
dict(
id='vol_id2',
volumeId='vol_id2',
serverId='instance2_uuid',
device='/foo2')]
text = serializer.serialize(dict(volumeAttachments=raw_attaches))
tree = etree.fromstring(text)
self.assertEqual('volumeAttachments', tree.tag)
self.assertEqual(len(raw_attaches), len(tree))
for idx, child in enumerate(tree):
self.assertEqual('volumeAttachment', child.tag)
self._verify_volume_attachment(raw_attaches[idx], child)
def test_volume_show_create_serializer(self):
serializer = volumes.VolumeTemplate()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availabilityZone='vol_availability',
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol_id',
volumeId='vol_id',
serverId='instance_uuid',
device='/foo')],
displayName='vol_name',
displayDescription='vol_desc',
volumeType='vol_type',
snapshotId='snap_id',
metadata=dict(
foo='bar',
baz='quux',
),
)
text = serializer.serialize(dict(volume=raw_volume))
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [dict(
id='vol1_id',
status='vol1_status',
size=1024,
availabilityZone='vol1_availability',
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol1_id',
volumeId='vol1_id',
serverId='instance_uuid',
device='/foo1')],
displayName='vol1_name',
displayDescription='vol1_desc',
volumeType='vol1_type',
snapshotId='snap1_id',
metadata=dict(
foo='vol1_foo',
bar='vol1_bar',
),
),
dict(
id='vol2_id',
status='vol2_status',
size=1024,
availabilityZone='vol2_availability',
createdAt=timeutils.utcnow(),
attachments=[dict(
id='vol2_id',
volumeId='vol2_id',
serverId='instance_uuid',
device='/foo2')],
displayName='vol2_name',
displayDescription='vol2_desc',
volumeType='vol2_type',
snapshotId='snap2_id',
metadata=dict(
foo='vol2_foo',
bar='vol2_bar',
),
)]
text = serializer.serialize(dict(volumes=raw_volumes))
tree = etree.fromstring(text)
self.assertEqual('volumes', tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_volume(raw_volumes[idx], child)
class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
def setUp(self):
super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
self.deserializer = volumes.CreateDeserializer()
def test_minimal_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
},
}
self.assertEqual(request['body'], expected)
def test_display_name(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
},
}
self.assertEqual(request['body'], expected)
def test_display_description(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
},
}
self.assertEqual(request['body'], expected)
def test_volume_type(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
},
}
self.assertEqual(request['body'], expected)
def test_availability_zone(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
},
}
self.assertEqual(request['body'], expected)
def test_metadata(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
display_name="Volume-xml"
size="1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"display_name": "Volume-xml",
"size": "1",
"metadata": {
"Type": "work",
},
},
}
self.assertEqual(request['body'], expected)
def test_full_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
"metadata": {
"Type": "work",
},
},
}
self.maxDiff = None
self.assertEqual(request['body'], expected)
class CommonBadRequestTestCase(object):
resource = None
entity_name = None
controller_cls = None
kwargs = {}
"""
Tests of places we throw 400 Bad Request from
"""
def setUp(self):
super(CommonBadRequestTestCase, self).setUp()
self.controller = self.controller_cls()
def _bad_request_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/' + self.resource)
req.method = 'POST'
kwargs = self.kwargs.copy()
kwargs['body'] = body
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, **kwargs)
def test_create_no_body(self):
self._bad_request_create(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._bad_request_create(body=body)
def test_create_malformed_entity(self):
body = {self.entity_name: 'string'}
self._bad_request_create(body=body)
class BadRequestVolumeTestCaseV21(CommonBadRequestTestCase,
test.TestCase):
resource = 'os-volumes'
entity_name = 'volume'
controller_cls = volumes_v3.VolumeController
class BadRequestVolumeTestCaseV2(BadRequestVolumeTestCaseV21):
controller_cls = volumes.VolumeController
class BadRequestAttachmentTestCase(CommonBadRequestTestCase,
test.TestCase):
resource = 'servers/' + FAKE_UUID + '/os-volume_attachments'
entity_name = 'volumeAttachment'
controller_cls = volumes.VolumeAttachmentController
kwargs = {'server_id': FAKE_UUID}
class BadRequestSnapshotTestCaseV21(CommonBadRequestTestCase,
test.TestCase):
resource = 'os-snapshots'
entity_name = 'snapshot'
controller_cls = volumes.SnapshotController
class BadRequestSnapshotTestCaseV2(BadRequestSnapshotTestCaseV21):
controller_cls = volumes_v3.SnapshotController
class ShowSnapshotTestCaseV21(test.TestCase):
snapshot_cls = volumes_v3.SnapshotController
def setUp(self):
super(ShowSnapshotTestCaseV21, self).setUp()
self.controller = self.snapshot_cls()
self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
self.req.method = 'GET'
def test_show_snapshot_not_exist(self):
def fake_get_snapshot(self, context, id):
raise exception.SnapshotNotFound(snapshot_id=id)
self.stubs.Set(cinder.API, 'get_snapshot', fake_get_snapshot)
self.assertRaises(exc.HTTPNotFound,
self.controller.show, self.req, FAKE_UUID_A)
class ShowSnapshotTestCaseV2(ShowSnapshotTestCaseV21):
snapshot_cls = volumes.SnapshotController
class CreateSnapshotTestCaseV21(test.TestCase):
snapshot_cls = volumes_v3.SnapshotController
def setUp(self):
super(CreateSnapshotTestCaseV21, self).setUp()
self.controller = self.snapshot_cls()
self.stubs.Set(cinder.API, 'get', fake_get_volume)
self.stubs.Set(cinder.API, 'create_snapshot_force',
fake_create_snapshot)
self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot)
self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
self.req.method = 'POST'
self.body = {'snapshot': {'volume_id': 1}}
def test_force_true(self):
self.body['snapshot']['force'] = 'True'
self.controller.create(self.req, body=self.body)
def test_force_false(self):
self.body['snapshot']['force'] = 'f'
self.controller.create(self.req, body=self.body)
def test_force_invalid(self):
self.body['snapshot']['force'] = 'foo'
self.assertRaises(exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
class CreateSnapshotTestCaseV2(CreateSnapshotTestCaseV21):
snapshot_cls = volumes.SnapshotController
class DeleteSnapshotTestCaseV21(test.TestCase):
snapshot_cls = volumes_v3.SnapshotController
def setUp(self):
super(DeleteSnapshotTestCaseV21, self).setUp()
self.controller = self.snapshot_cls()
self.stubs.Set(cinder.API, 'get', fake_get_volume)
self.stubs.Set(cinder.API, 'create_snapshot_force',
fake_create_snapshot)
self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot)
self.stubs.Set(cinder.API, 'delete_snapshot', fake_delete_snapshot)
self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
def test_normal_delete(self):
self.req.method = 'POST'
self.body = {'snapshot': {'volume_id': 1}}
result = self.controller.create(self.req, body=self.body)
self.req.method = 'DELETE'
result = self.controller.delete(self.req, result['snapshot']['id'])
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller, volumes_v3.SnapshotController):
status_int = self.controller.delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
def test_delete_snapshot_not_exists(self):
def fake_delete_snapshot_not_exist(self, context, snapshot_id):
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
self.stubs.Set(cinder.API, 'delete_snapshot',
fake_delete_snapshot_not_exist)
self.req.method = 'POST'
self.body = {'snapshot': {'volume_id': 1}}
result = self.controller.create(self.req, body=self.body)
self.req.method = 'DELETE'
self.assertRaises(exc.HTTPNotFound, self.controller.delete,
self.req, result['snapshot']['id'])
class DeleteSnapshotTestCaseV2(DeleteSnapshotTestCaseV21):
snapshot_cls = volumes.SnapshotController
class AssistedSnapshotCreateTestCase(test.TestCase):
def setUp(self):
super(AssistedSnapshotCreateTestCase, self).setUp()
self.controller = assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_create',
fake_compute_volume_snapshot_create)
def test_assisted_create(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot': {'volume_id': 1, 'create_info': {}}}
req.method = 'POST'
self.controller.create(req, body=body)
def test_assisted_create_missing_create_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot': {'volume_id': 1}}
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body=body)
class AssistedSnapshotDeleteTestCase(test.TestCase):
def setUp(self):
super(AssistedSnapshotDeleteTestCase, self).setUp()
self.controller = assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
fake_compute_volume_snapshot_delete)
def test_assisted_delete(self):
params = {
'delete_info': jsonutils.dumps({'volume_id': 1}),
}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-assisted-volume-snapshots?%s' %
'&'.join(['%s=%s' % (k, v) for k, v in params.iteritems()]))
req.method = 'DELETE'
result = self.controller.delete(req, '5')
self.assertEqual(result.status_int, 204)
def test_assisted_delete_missing_delete_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '5')
|
|
import os
import sys
import time
import crocodoc
from crocodoc import CrocodocError
crocodoc.api_token = 'YOUR_API_TOKEN'
"""
Example #1
Upload a file to Crocodoc. We're uploading Form W4 from the IRS by URL.
"""
print 'Example #1 - Upload Form W4 from the IRS by URL.'
form_w4_url = 'http://www.irs.gov/pub/irs-pdf/fw4.pdf'
sys.stdout.write(' Uploading... ')
uuid = None
try:
uuid = crocodoc.document.upload(url=form_w4_url)
print 'success :)'
print ' UUID is ' + uuid
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #2
Check the status of the file from Example #1.
"""
print ''
print 'Example #2 - Check the status of the file we just uploaded.'
sys.stdout.write(' Checking status... ')
try:
status = crocodoc.document.status(uuid)
if status.get('error') == None:
print 'success :)'
print ' File status is ' + status['status'] + '.'
print ' File ' + ('is' if status['viewable'] else 'is not') + ' viewable.'
else:
print 'failed :('
print ' Error Message: ' + status['error']
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #3
Upload another file to Crocodoc. We're uploading Form W4 from the IRS as a PDF.
"""
print ''
print 'Example #3 - Upload a sample .pdf as a file.'
uuid2 = None
file_path = './example-files/form-w4.pdf'
if (os.path.isfile(file_path)):
file_handle = open(file_path, 'r')
sys.stdout.write(' Uploading... ')
uuid2 = None
try:
uuid2 = crocodoc.document.upload(file=file_handle)
print 'success :)'
print ' UUID is ' + uuid2
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
else:
print ' Skipping because the sample pdf can\'t be found.'
"""
Example #4
Check the status of both files we uploaded in Examples #1 and #3.
"""
print ''
print 'Example #4 - Check the status of both files at the same time.'
sys.stdout.write(' Checking statuses... ')
try:
statuses = crocodoc.document.status([uuid, uuid2])
if (len(statuses) != 0):
print 'success :)'
if (statuses[0].get('error') == None):
print ' File #1 status is ' + statuses[0]['status'] + '.'
print ' File #1 ' + ('is' if statuses[0]['viewable'] else 'is not') + ' viewable.'
else:
print ' File #1 failed :('
print ' Error Message: ' + statuses[0]['error']
if (statuses[1].get('error') == None):
print ' File #2 status is ' + statuses[1]['status'] + '.'
print ' File #2 ' + ('is' if statuses[1]['viewable'] else 'is not') + ' viewable.'
else:
print ' File #2 failed :('
print ' Error Message: ' . statuses[1]['error']
else:
print 'failed :('
print ' Statuses were not returned.'
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #5
Wait ten seconds and check the status of both files again.
"""
print ''
print 'Example #5 - Wait ten seconds and check the statuses again.'
sys.stdout.write(' Waiting... ')
time.sleep(10)
print 'done.'
sys.stdout.write(' Checking statuses... ')
try:
statuses = crocodoc.document.status([uuid, uuid2])
if (len(statuses) != 0):
print 'success :)'
if (statuses[0].get('error') == None):
print ' File #1 status is ' + statuses[0]['status'] + '.'
print ' File #1 ' + ('is' if statuses[0]['viewable'] else 'is not') + ' viewable.'
else:
print ' File #1 failed :('
print ' Error Message: ' + statuses[0]['error']
if (statuses[1].get('error') == None):
print ' File #2 status is ' + statuses[1]['status'] + '.'
print ' File #2 ' + ('is' if statuses[1]['viewable'] else 'is not') + ' viewable.'
else:
print ' File #2 failed :('
print ' Error Message: ' . statuses[1]['error']
else:
print 'failed :('
print ' Statuses were not returned.'
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #6
Delete the file we uploaded from Example #1.
"""
print ''
print 'Example #6 - Delete the first file we uploaded.'
sys.stdout.write(' Deleting... ')
try:
deleted = crocodoc.document.delete(uuid)
if deleted:
print 'success :)'
print ' File was deleted.'
else:
print 'failed :('
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #7
Download the file we uploaded from Example #3 as an original
"""
print ''
print 'Example #7 - Download a file as an original.'
sys.stdout.write(' Downloading... ')
try:
file = crocodoc.download.document(uuid2)
filename = os.path.dirname(os.path.abspath(__file__)) + '/example-files/test-original.pdf'
file_handle = open(filename, 'w')
file_handle.write(file)
print 'success :)'
print ' File was downloaded to ' + filename + '.'
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #8
Download the file we uploaded from Example #3 as a PDF
"""
print ''
print 'Example #8 - Download a file as a PDF.'
sys.stdout.write(' Downloading...')
try:
file = crocodoc.download.document(uuid2, True)
filename = os.path.dirname(os.path.abspath(__file__)) + '/example-files/test.pdf'
file_handle = open(filename, 'w')
file_handle.write(file)
print 'success :)'
print ' File was downloaded to ' + filename + '.'
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #9
Download the file we uploaded from Example #3 with all options
"""
print ''
print 'Example #9 - Download a file with all options.'
sys.stdout.write(' Downloading...')
try:
file = crocodoc.download.document(uuid2, True, True, 'all')
filename = os.path.dirname(os.path.abspath(__file__)) + '/example-files/test-with-options.pdf'
file_handle = open(filename, 'w')
file_handle.write(file)
print 'success :)'
print ' File was downloaded to ' + filename + '.'
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #10
Download the file we uploaded from Example #3 as a default thumbnail
"""
print ''
print 'Example #10 - Download a default thumbnail from a file.'
sys.stdout.write(' Downloading...')
try:
file = crocodoc.download.thumbnail(uuid2)
filename = os.path.dirname(os.path.abspath(__file__)) + '/example-files/thumbnail.png'
file_handle = open(filename, 'w')
file_handle.write(file)
print 'success :)'
print ' File was downloaded to ' + filename + '.'
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #11
Download the file we uploaded from Example #3 as a large thumbnail
"""
print ''
print 'Example #11 - Download a large thumbnail from a file.'
sys.stdout.write(' Downloading...')
try:
file = crocodoc.download.thumbnail(uuid2, 250, 250)
filename = os.path.dirname(os.path.abspath(__file__)) + '/example-files/thumbnail-large.png'
file_handle = open(filename, 'w')
file_handle.write(file)
print 'success :)'
print ' File was downloaded to ' + filename + '.'
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #12
Create a session key for the file we uploaded from Example #3 with default
options.
"""
print ''
print 'Example #12 - Create a session key for a file with default options.'
sys.stdout.write(' Creating... ')
session_key = None
try:
session_key = crocodoc.session.create(uuid2)
print 'success :)'
print ' The session key is ' + session_key + '.'
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #13
Create a session key for the file we uploaded from Example #3 all of the
options.
"""
print ''
print 'Example #13 - Create a session key for a file with all of the options.'
sys.stdout.write(' Creating... ')
session_key = None
try:
user = {'id': 1, 'name': 'John Crocodoc'}
session_key = crocodoc.session.create(uuid2,
editable=True, user=user,
filter='all', admin=True, downloadable=True,
copyprotected=False, demo=False, sidebar='visible'
)
print 'success :)'
print ' The session key is ' + session_key + '.'
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
"""
Example #14
Delete the second file we uploaded.
"""
print ''
print 'Example #14 - Delete the first file we uploaded.'
sys.stdout.write(' Deleting... ')
try:
deleted = crocodoc.document.delete(uuid2)
if deleted:
print 'success :)'
print ' File was deleted.'
else:
print 'failed :('
except CrocodocError as e:
print 'failed :('
print ' Error Code: ' + str(e.status_code)
print ' Error Message: ' + e.error_message
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
from builtins import open
from collections import defaultdict
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir
from pants.contrib.go.subsystems.fetcher_factory import FetcherFactory
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.tasks.go_task import GoTask
class GoFetch(GoTask):
"""Fetches third-party Go libraries."""
@classmethod
def implementation_version(cls):
return super(GoFetch, cls).implementation_version() + [('GoFetch', 2)]
@classmethod
def subsystem_dependencies(cls):
return super(GoFetch, cls).subsystem_dependencies() + (FetcherFactory,)
@classmethod
def product_types(cls):
return ['go_remote_lib_src']
@classmethod
def register_options(cls, register):
pass
@property
def cache_target_dirs(self):
# TODO(John Sirois): See TODO in _fetch_pkg, re-consider how artifact caching works for fetches.
return True
def execute(self):
self.context.products.safe_create_data('go_remote_lib_src', lambda: defaultdict(str))
go_remote_libs = self.context.targets(self.is_remote_lib)
if not go_remote_libs:
return
undeclared_deps = self._transitive_download_remote_libs(set(go_remote_libs))
if undeclared_deps:
self._log_undeclared_deps(undeclared_deps)
raise TaskError('Failed to resolve transitive Go remote dependencies.')
def _log_undeclared_deps(self, undeclared_deps):
for dependee, deps in undeclared_deps.items():
self.context.log.error('{address} has remote dependencies which require local declaration:'
.format(address=dependee.address.reference()))
for dep_import_path, address in deps:
self.context.log.error('\t--> {import_path} (expected go_remote_library declaration '
'at {address})'.format(import_path=dep_import_path,
address=address.reference()))
@staticmethod
def _get_fetcher(import_path):
return FetcherFactory.global_instance().get_fetcher(import_path)
def _fetch_pkg(self, gopath, pkg, rev):
"""Fetch the package and setup symlinks."""
fetcher = self._get_fetcher(pkg)
root = fetcher.root()
root_dir = os.path.join(self.workdir, 'fetches', root, rev)
# Only fetch each remote root once.
if not os.path.exists(root_dir):
with temporary_dir() as tmp_fetch_root:
with self.context.new_workunit('fetch {}'.format(pkg)):
fetcher.fetch(dest=tmp_fetch_root, rev=rev)
safe_mkdir(root_dir)
for path in os.listdir(tmp_fetch_root):
shutil.move(os.path.join(tmp_fetch_root, path), os.path.join(root_dir, path))
# TODO(John Sirois): Circle back and get get rid of this symlink tree.
# GoWorkspaceTask will further symlink a single package from the tree below into a
# target's workspace when it could just be linking from the fetch_dir. The only thing
# standing in the way is a determination of what we want to artifact cache. If we don't
# want to cache fetched zips, linking straight from the fetch_dir works simply. Otherwise
# thought needs to be applied to using the artifact cache directly or synthesizing a
# canonical owner target for the fetched files that 'child' targets (subpackages) can
# depend on and share the fetch from.
dest_dir = os.path.join(gopath, 'src', root)
# We may have been `invalidate`d and not `clean-all`ed so we need a new empty symlink
# chroot to avoid collision; thus `clean=True`.
safe_mkdir(dest_dir, clean=True)
for path in os.listdir(root_dir):
os.symlink(os.path.join(root_dir, path), os.path.join(dest_dir, path))
# Note: Will update import_root_map.
def _map_fetched_remote_source(self, go_remote_lib, gopath, all_known_remote_libs,
resolved_remote_libs, undeclared_deps, import_root_map):
# See if we've computed the remote import paths for this rev of this lib in a previous run.
remote_import_paths_cache = os.path.join(os.path.dirname(gopath), 'remote_import_paths.txt')
if os.path.exists(remote_import_paths_cache):
with open(remote_import_paths_cache, 'r') as fp:
remote_import_paths = [line.strip() for line in fp.readlines()]
else:
remote_import_paths = self._get_remote_import_paths(go_remote_lib.import_path,
gopath=gopath)
with safe_concurrent_creation(remote_import_paths_cache) as safe_path:
with open(safe_path, 'w') as fp:
for path in remote_import_paths:
fp.write('{}\n'.format(path))
for remote_import_path in remote_import_paths:
remote_root = import_root_map.get(remote_import_path)
if remote_root is None:
fetcher = self._get_fetcher(remote_import_path)
remote_root = fetcher.root()
import_root_map[remote_import_path] = remote_root
spec_path = os.path.join(go_remote_lib.target_base, remote_root)
package_path = GoRemoteLibrary.remote_package_path(remote_root, remote_import_path)
target_name = package_path or os.path.basename(remote_root)
address = Address(spec_path, target_name)
if not any(address == lib.address for lib in all_known_remote_libs):
try:
# If we've already resolved a package from this remote root, its ok to define an
# implicit synthetic remote target for all other packages in the same remote root.
same_remote_libs = [lib for lib in all_known_remote_libs
if spec_path == lib.address.spec_path]
implicit_ok = any(same_remote_libs)
# If we're creating a synthetic remote target, we should pin it to the same
# revision as the rest of the library.
rev = None
if implicit_ok:
rev = same_remote_libs[0].rev
remote_lib = self._resolve(go_remote_lib, address, package_path, rev, implicit_ok)
resolved_remote_libs.add(remote_lib)
all_known_remote_libs.add(remote_lib)
except self.UndeclaredRemoteLibError as e:
undeclared_deps[go_remote_lib].add((remote_import_path, e.address))
self.context.build_graph.inject_dependency(go_remote_lib.address, address)
def _transitive_download_remote_libs(self, go_remote_libs, all_known_remote_libs=None):
"""Recursively attempt to resolve / download all remote transitive deps of go_remote_libs.
Returns a dict<GoRemoteLibrary, set<tuple<str, Address>>>, which maps a go remote library to a
set of unresolved remote dependencies, each dependency expressed as a tuple containing the
the import path of the dependency and the expected target address. If all transitive
dependencies were successfully resolved, returns an empty dict.
Downloads as many invalidated transitive dependencies as possible, and returns as many
undeclared dependencies as possible. However, because the dependencies of a remote library
can only be determined _after_ it has been downloaded, a transitive dependency of an undeclared
remote library will never be detected.
Because go_remote_libraries do not declare dependencies (rather, they are inferred), injects
all successfully resolved transitive dependencies into the build graph.
"""
if not go_remote_libs:
return {}
all_known_remote_libs = all_known_remote_libs or set()
all_known_remote_libs.update(go_remote_libs)
resolved_remote_libs = set()
undeclared_deps = defaultdict(set)
go_remote_lib_src = self.context.products.get_data('go_remote_lib_src')
with self.invalidated(go_remote_libs) as invalidation_check:
# We accumulate mappings from import path to root (e.g., example.org/pkg/foo -> example.org)
# from all targets in this map, so that targets share as much of this information as
# possible during this run.
# We cache these mappings. to avoid repeatedly fetching them over the network via the
# meta tag protocol. Note that this mapping is unversioned: It's defined as "whatever meta
# tag is currently being served at the relevant URL", which is inherently independent of
# the rev of the remote library. We (and the entire Go ecosystem) assume that this mapping
# never changes, in practice.
import_root_map = {}
for vt in invalidation_check.all_vts:
import_root_map_path = os.path.join(vt.results_dir, 'pkg_root_map.txt')
import_root_map.update(self._read_import_root_map_file(import_root_map_path))
go_remote_lib = vt.target
gopath = os.path.join(vt.results_dir, 'gopath')
if not vt.valid:
self._fetch_pkg(gopath, go_remote_lib.import_path, go_remote_lib.rev)
# _map_fetched_remote_source() will modify import_root_map.
self._map_fetched_remote_source(go_remote_lib, gopath, all_known_remote_libs,
resolved_remote_libs, undeclared_deps, import_root_map)
go_remote_lib_src[go_remote_lib] = os.path.join(gopath, 'src', go_remote_lib.import_path)
# Cache the mapping against this target's key. Note that because we accumulate
# mappings across targets, the file may contain mappings that this target doesn't
# need or care about (although it will contain all the mappings this target does need).
# But the file is small, so there's no harm in this redundancy.
self._write_import_root_map_file(import_root_map_path, import_root_map)
# Recurse after the invalidated block, so the libraries we downloaded are now "valid"
# and thus we don't try to download a library twice.
trans_undeclared_deps = self._transitive_download_remote_libs(resolved_remote_libs,
all_known_remote_libs)
undeclared_deps.update(trans_undeclared_deps)
return undeclared_deps
class UndeclaredRemoteLibError(Exception):
def __init__(self, address):
self.address = address
def _resolve(self, dependent_remote_lib, address, pkg, rev, implicit_ok):
"""Resolves the GoRemoteLibrary at `address` defining the given `pkg`.
If `implicit_ok` is True, then a GoRemoteLibrary to own `pkg` is always synthesized if it does
not already exist; otherwise the address must already exist in the build graph (a BUILD file
must exist on disk that owns the given `pkg` and declares a `rev` for it).
:param dependent_remote_lib: The remote library that depends on the remote `pkg`.
:type: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:param address: The address of the remote library that should own `pkg`.
:type: :class:`pants.base.Address`
:param string pkg: The remote package path whose owning target needs to be resolved.
:param string rev: The revision of the package. None defaults to `master`.
:param bool implicit_ok: `False` if the given `address` must be defined in a BUILD file on disk;
otherwise a remote library to own `pkg` will always be created and
returned.
:returns: The resulting resolved remote library after injecting it in the build graph.
:rtype: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:raises: :class:`GoFetch.UndeclaredRemoteLibError`: If no BUILD file exists for the remote root
`pkg` lives in.
"""
try:
self.context.build_graph.inject_address_closure(address)
except AddressLookupError:
if implicit_ok:
self.context.add_new_target(address=address,
target_base=dependent_remote_lib.target_base,
target_type=GoRemoteLibrary,
pkg=pkg,
rev=rev)
else:
raise self.UndeclaredRemoteLibError(address)
return self.context.build_graph.get_target(address)
@staticmethod
def _is_relative(import_path):
return import_path.startswith('.')
def _get_remote_import_paths(self, pkg, gopath=None):
"""Returns the remote import paths declared by the given remote Go `pkg`.
NB: This only includes production code imports, no test code imports.
"""
import_listing = self.import_oracle.list_imports(pkg, gopath=gopath)
return [imp for imp in import_listing.imports
if (not self.import_oracle.is_go_internal_import(imp) and
# We assume relative imports are local to the package and skip attempts to
# recursively resolve them.
not self._is_relative(imp))]
@staticmethod
def _read_import_root_map_file(path):
"""Reads a file mapping import paths to roots (e.g., example.org/pkg/foo -> example.org)."""
if os.path.exists(path):
with open(path, 'r') as fp:
return dict({import_path: root for import_path, root in
(x.strip().split('\t') for x in fp.readlines())})
else:
return {}
@staticmethod
def _write_import_root_map_file(path, import_root_map):
"""Writes a file mapping import paths to roots."""
with safe_concurrent_creation(path) as safe_path:
with open(safe_path, 'w') as fp:
for import_path, root in sorted(import_root_map.items()):
fp.write('{}\t{}\n'.format(import_path, root))
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for resource tracker claims."""
import uuid
import mock
from nova.compute import claims
from nova import context
from nova import exception
from nova import objects
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.pci import fakes as pci_fakes
_NODENAME = 'fake-node'
class FakeResourceHandler(object):
test_called = False
usage_is_instance = False
def test_resources(self, usage, limits):
self.test_called = True
self.usage_is_itype = usage.get('name') == 'fakeitype'
return []
class DummyTracker(object):
icalled = False
rcalled = False
def __init__(self):
self.new_pci_tracker()
def abort_instance_claim(self, *args, **kwargs):
self.icalled = True
def drop_move_claim(self, *args, **kwargs):
self.rcalled = True
def new_pci_tracker(self):
ctxt = context.RequestContext('testuser', 'testproject')
self.pci_tracker = pci_manager.PciDevTracker(ctxt)
class ClaimTestCase(test.NoDBTestCase):
def setUp(self):
super(ClaimTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self.instance = None
self.resources = self._fake_resources()
self.tracker = DummyTracker()
self.empty_requests = objects.InstancePCIRequests(
requests=[]
)
def _claim(self, limits=None, overhead=None, requests=None, **kwargs):
numa_topology = kwargs.pop('numa_topology', None)
instance = self._fake_instance(**kwargs)
instance.flavor = self._fake_instance_type(**kwargs)
if numa_topology:
db_numa_topology = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance.uuid,
'numa_topology': numa_topology._to_json(),
'pci_requests': (requests or self.empty_requests).to_json()
}
else:
db_numa_topology = None
if overhead is None:
overhead = {'memory_mb': 0}
requests = requests or self.empty_requests
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value=db_numa_topology)
def get_claim(mock_extra_get):
return claims.Claim(self.context, instance, _NODENAME,
self.tracker, self.resources, requests,
overhead=overhead, limits=limits)
return get_claim()
def _fake_instance(self, **kwargs):
instance = {
'uuid': str(uuid.uuid1()),
'memory_mb': 1024,
'root_gb': 10,
'ephemeral_gb': 5,
'vcpus': 1,
'system_metadata': {},
'numa_topology': None
}
instance.update(**kwargs)
return fake_instance.fake_instance_obj(self.context, **instance)
def _fake_instance_type(self, **kwargs):
instance_type = {
'id': 1,
'name': 'fakeitype',
'memory_mb': 1024,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 5
}
instance_type.update(**kwargs)
return objects.Flavor(**instance_type)
def _fake_resources(self, values=None):
resources = {
'memory_mb': 2048,
'memory_mb_used': 0,
'free_ram_mb': 2048,
'local_gb': 20,
'local_gb_used': 0,
'free_disk_gb': 20,
'vcpus': 2,
'vcpus_used': 0,
'numa_topology': objects.NUMATopology(
cells=[objects.NUMACell(id=1, cpuset=set([1, 2]), memory=512,
memory_usage=0, cpu_usage=0,
mempages=[],
siblings=[set([1]), set([2])],
pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([3, 4]), memory=512,
memory_usage=0, cpu_usage=0,
mempages=[],
siblings=[set([3]), set([4])],
pinned_cpus=set([]))]
)._to_json()
}
if values:
resources.update(values)
return objects.ComputeNode(**resources)
def test_memory_unlimited(self):
self._claim(memory_mb=99999999)
def test_disk_unlimited_root(self):
self._claim(root_gb=999999)
def test_disk_unlimited_ephemeral(self):
self._claim(ephemeral_gb=999999)
def test_memory_with_overhead(self):
overhead = {'memory_mb': 8}
limits = {'memory_mb': 2048}
self._claim(memory_mb=2040, limits=limits,
overhead=overhead)
def test_memory_with_overhead_insufficient(self):
overhead = {'memory_mb': 9}
limits = {'memory_mb': 2048}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, overhead=overhead,
memory_mb=2040)
def test_memory_oversubscription(self):
self._claim(memory_mb=4096)
def test_disk_with_overhead(self):
overhead = {'memory_mb': 0,
'disk_gb': 1}
limits = {'disk_gb': 100}
claim_obj = self._claim(root_gb=99, ephemeral_gb=0, limits=limits,
overhead=overhead)
self.assertEqual(100, claim_obj.disk_gb)
def test_disk_with_overhead_insufficient(self):
overhead = {'memory_mb': 0,
'disk_gb': 2}
limits = {'disk_gb': 100}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, overhead=overhead,
root_gb=99, ephemeral_gb=0)
def test_disk_with_overhead_insufficient_no_root(self):
overhead = {'memory_mb': 0,
'disk_gb': 2}
limits = {'disk_gb': 1}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, overhead=overhead,
root_gb=0, ephemeral_gb=0)
def test_memory_insufficient(self):
limits = {'memory_mb': 8192}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, memory_mb=16384)
def test_disk_oversubscription(self):
limits = {'disk_gb': 60}
self._claim(root_gb=10, ephemeral_gb=40,
limits=limits)
def test_disk_insufficient(self):
limits = {'disk_gb': 45}
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
"disk",
self._claim, limits=limits, root_gb=10, ephemeral_gb=40)
def test_disk_and_memory_insufficient(self):
limits = {'disk_gb': 45, 'memory_mb': 8192}
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
"memory.*disk",
self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
memory_mb=16384)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=True)
def test_pci_pass(self, mock_pci_supports_requests):
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
self._claim(requests=requests)
mock_pci_supports_requests.assert_called_once_with([request])
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=False)
def test_pci_fail(self, mock_pci_supports_requests):
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
'Claim pci failed.',
self._claim, requests=requests)
mock_pci_supports_requests.assert_called_once_with([request])
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests')
def test_pci_pass_no_requests(self, mock_pci_supports_requests):
self._claim()
self.assertFalse(mock_pci_supports_requests.called)
def test_numa_topology_no_limit(self):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self._claim(numa_topology=huge_instance)
def test_numa_topology_fails(self):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2, 3, 4, 5]), memory=2048)])
limit_topo = objects.NUMATopologyLimits(
cpu_allocation_ratio=1, ram_allocation_ratio=1)
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim,
limits={'numa_topology': limit_topo},
numa_topology=huge_instance)
def test_numa_topology_passes(self):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
limit_topo = objects.NUMATopologyLimits(
cpu_allocation_ratio=1, ram_allocation_ratio=1)
self._claim(limits={'numa_topology': limit_topo},
numa_topology=huge_instance)
@pci_fakes.patch_pci_whitelist
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_numa_topology_with_pci(self, mock_get_by_instance):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 1,
'dev_type': 'type-PCI',
'parent_addr': 'a1',
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict])
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
mock_get_by_instance.return_value = requests
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self._claim(requests=requests, numa_topology=huge_instance)
@pci_fakes.patch_pci_whitelist
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_numa_topology_with_pci_fail(self, mock_get_by_instance):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 1,
'dev_type': 'type-PCI',
'parent_addr': 'a1',
'status': 'available'}
dev_dict2 = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 2,
'dev_type': 'type-PCI',
'parent_addr': 'a1',
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict, dev_dict2])
request = objects.InstancePCIRequest(count=2,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
mock_get_by_instance.return_value = requests
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim,
requests=requests,
numa_topology=huge_instance)
@pci_fakes.patch_pci_whitelist
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_numa_topology_with_pci_no_numa_info(self, mock_get_by_instance):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': None,
'dev_type': 'type-PCI',
'parent_addr': 'a1',
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict])
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
mock_get_by_instance.return_value = requests
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self._claim(requests=requests, numa_topology=huge_instance)
def test_abort(self):
claim = self._abort()
self.assertTrue(claim.tracker.icalled)
def _abort(self):
claim = None
try:
with self._claim(memory_mb=4096) as claim:
raise test.TestingException("abort")
except test.TestingException:
pass
return claim
class MoveClaimTestCase(ClaimTestCase):
def _claim(self, limits=None, overhead=None, requests=None,
image_meta=None, **kwargs):
instance_type = self._fake_instance_type(**kwargs)
numa_topology = kwargs.pop('numa_topology', None)
image_meta = image_meta or {}
self.instance = self._fake_instance(**kwargs)
self.instance.numa_topology = None
if numa_topology:
self.db_numa_topology = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': self.instance.uuid,
'numa_topology': numa_topology._to_json(),
'pci_requests': (requests or self.empty_requests).to_json()
}
else:
self.db_numa_topology = None
if overhead is None:
overhead = {'memory_mb': 0}
requests = requests or self.empty_requests
@mock.patch('nova.virt.hardware.numa_get_constraints',
return_value=numa_topology)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value=self.db_numa_topology)
def get_claim(mock_extra_get, mock_numa_get):
return claims.MoveClaim(self.context, self.instance, _NODENAME,
instance_type, image_meta, self.tracker,
self.resources, requests,
overhead=overhead, limits=limits)
return get_claim()
@mock.patch('nova.objects.Instance.drop_migration_context')
def test_abort(self, mock_drop):
claim = self._abort()
self.assertTrue(claim.tracker.rcalled)
mock_drop.assert_called_once_with()
def test_image_meta(self):
claim = self._claim()
self.assertIsInstance(claim.image_meta, objects.ImageMeta)
def test_image_meta_object_passed(self):
image_meta = objects.ImageMeta()
claim = self._claim(image_meta=image_meta)
self.assertIsInstance(claim.image_meta, objects.ImageMeta)
|
|
#!/usr/bin/python2
# Written by Reehan Shaikh
# Last update: April 11, 2006
# Adapted from gloader.py
# Revised by Daniel Ng
import sys, os, signal, time
from starter import Start
#import batch_ipcrm
import Pyro.core
debug_mode = 0
debug_uswitch = 0
# set the program names
VS_PROG = "uswitch"
VM_PROG = "glinux"
GR_PROG = "grouter"
MCONSOLE_PROG = "uml_mconsole"
SOCKET_NAME = "gini_socket"
VS_PROG_BIN = VS_PROG
VM_PROG_BIN = VM_PROG
GR_PROG_BIN = GR_PROG
MCONSOLE_PROG_BIN = MCONSOLE_PROG
SRC_FILENAME = "%s/gini_dist" % os.environ["GINI_HOME"] # setup file name
UML_WAIT_DELAY = 1.5 # wait delay between checking alive UML
GROUTER_WAIT = 0.5 # wait delay between starting routers
GINI_TMP_FILE = "gini_tmp_file" # tmp file used when checking alive UML
LOG_FILE = "gdist_log" # log file for gloader messages
SCREEN_LOG = True # telling to enable/disable screenlog file
SSHOPTS = " -o StrictHostKeyChecking=false "
# set this flag to True if running without gbuilder
independent = False
if not independent:
uriIn = open("%s/tmp/pyro_uris" % os.environ["GINI_HOME"], "r")
uri = uriIn.readline().strip()
uri2 = uriIn.readline().strip()
uriIn.close()
# start the network
def distGINI(myGINI, options, ips):
"starting the GINI network components"
# the starting order is important here
# first switches, then routers, and at last UMLs.
print "\nStarting GINI switches..."
success = createVS(myGINI, options, ips)
return success
# create a switch for every UML and router interface so that these
# components can interact with eaech other over sockets
def createVS(myGINI, options, ips):
"create the switch config file and start the switch for UML and router interfaces"
success = True
if not independent:
sys.stdout = open("/dev/null", "w")
tm = Pyro.core.getProxyForURI(uri2)
sys.stdout = sys.__stdout__
# create the real switch devices
for i in range(0, len(ips[5]), 2):
print "Starting real Switch on machine %s...\t" % (ips[5][i])
# find the j that corresponds to the machine the switch is running on
for j in range(0, len(ips[0]), 2):
if (ips[0][j] == ips[5][i]):
break
remotes = []
unique_port = 0
for h in range(0, len(ips[3]), 4):
if (ips[3][h] == ips[5][i+1].name):
if ips[3][h+1]:
if not remotes.count(ips[3][h+1]):
remotes.append(ips[3][h+1])
# take the first unique port to share among destinations
if not unique_port and ips[3][h+3]:
unique_port = ips[3][h+3]
# create a SWITCH directory on remote machine, under the given specified directory from the IP file
subSwitchDir = "%s/GINI/%s" % (ips[0][j + 1], ips[5][i+1].name)
os.system("ssh" + SSHOPTS + ips[5][i] + " rm -rf " + subSwitchDir)
time.sleep(GROUTER_WAIT)
os.system("ssh" + SSHOPTS + ips[5][i] + " mkdir " + subSwitchDir)
# create script files to run on remote machines
scrpt = open("switch.sh", 'w')
if unique_port:
remotestring = ""
for remoteIP in remotes:
remotestring += "-r %s " % remoteIP.split("@")[-1]
command = "cd %s/\n%s -s %s.ctl -l uswitch.log -p uswitch.pid -u %d %s" % (subSwitchDir, VS_PROG, SOCKET_NAME, unique_port, remotestring)
else:
command = "cd %s/\n%s -s %s.ctl -l uswitch.log -p uswitch.pid" % (subSwitchDir, VS_PROG, SOCKET_NAME)
if debug_uswitch:
command += " -d -d"
scrpt.write(command)
scrpt.close()
os.system("chmod 755 switch.sh")
os.system("scp" + SSHOPTS + " switch.sh " + ips[5][i] + ":" + subSwitchDir + "/" + " >> ip_test_log")
time.sleep(GROUTER_WAIT)
command = " %s/switch.sh&" % subSwitchDir
if debug_mode:
rinput = ""
while rinput != "y" and rinput != "n" and rinput != "e":
rinput = raw_input("Enter y/n to start device or e to exit: ")
if rinput == "y":
os.system("ssh" + SSHOPTS + ips[5][i] + command)
elif rinput == "e":
sys.exit(1)
else:
os.system("ssh" + SSHOPTS + ips[5][i] + command)
time.sleep(GROUTER_WAIT)
if not independent:
os.system("ssh %s cat %s/uswitch.pid > pid.tmp" % (ips[5][i], subSwitchDir))
pidIn = open("pid.tmp", "r")
line = pidIn.readline()
pidIn.close()
os.remove("pid.tmp")
tm.notify(ips[5][i+1].name, line.strip(), ips[5][i])
#os.system("ssh" + SSHOPTS + ips[2][i] + " rm -rf" + command)
#os.system("rm -rf switch.sh " + configFile)
print "[OK]"
# create the switches for the UML interfaces
for i in range(0, len(ips[2]), 2):
for inters in ips[2][i + 1].interfaces:
for h in range(0, len(ips[3]), 4):
if (ips[3][h] == ips[2][i + 1].name and ips[3][h + 1] == inters.name):
break
for k in range(0, len(ips[4]), 2):
if(ips[4][k+1] == ips[3][h + 2]):
break
# find the specified directory of this specific IP
for j in range(0, len(ips[0]), 2):
if (ips[0][j] == ips[2][i]):
break
port_s = ""
if ips[3][h+3] == "":
continue
elif type(ips[3][h+3]) == str:
dirname, port_s = ips[3][h+3].split(" ")
subSwitchDir = "%s/GINI/%s@%s:%s" % (ips[0][j+1], dirname, ips[0][j].split("@")[-1], port_s)
testcmd = "ssh %s test -e %s/%s.ctl" % (ips[2][i], subSwitchDir, SOCKET_NAME)
if os.system(testcmd) == 0:
print "Using shared Switch from machine %s...\t" % ips[2][i]
continue
print "Starting shared Switch on machine %s...\t" % ips[2][i]
else:
print "Starting Switch on machine %s for %s interface %s...\t" % (ips[2][i], ips[2][i + 1].name, inters.name),
# create a SWITCH directory on remote machine, under the given specified directory from the IP file
subSwitchDir = "%s/GINI/%s_Switch_%s" % (ips[0][j + 1], ips[2][i + 1].name, inters.name)
os.system("ssh" + SSHOPTS + ips[2][i] + " rm -rf " + subSwitchDir)
time.sleep(GROUTER_WAIT)
os.system("ssh" + SSHOPTS + ips[2][i] + " mkdir " + subSwitchDir)
### ------- execute ---------- ###
# create script files to run on remote machines
scrpt = open("switch.sh", 'w')
if port_s:
newport = port_s
else:
newport = ips[3][h + 3]
remoteIP = ips[4][k]
if remoteIP.find("@") >= 0:
newremote = remoteIP.split("@")[1]
else:
newremote = remoteIP
command = "cd %s/\n%s -s %s.ctl -l uswitch.log -p uswitch.pid -u %s -r %s" % (subSwitchDir, VS_PROG, SOCKET_NAME, newport, newremote)
if debug_uswitch:
command += " -d -d"
scrpt.write(command)
scrpt.close()
os.system("chmod 755 switch.sh")
os.system("scp" + SSHOPTS + " switch.sh " + ips[2][i] + ":" + subSwitchDir + "/" + " >> ip_test_log")
time.sleep(GROUTER_WAIT)
command = " %s/switch.sh&" % subSwitchDir
if debug_mode:
rinput = ""
while rinput != "y" and rinput != "n" and rinput != "e":
rinput = raw_input("Enter y/n to start device or e to exit: ")
if rinput == "y":
os.system("ssh" + SSHOPTS + ips[2][i] + command)
elif rinput == "e":
sys.exit(1)
else:
os.system("ssh" + SSHOPTS + ips[2][i] + command)
time.sleep(GROUTER_WAIT)
#os.system("ssh" + SSHOPTS + ips[2][i] + " rm -rf" + command)
#os.system("rm -rf switch.sh " + configFile)
print "[OK]"
# create the switches for the router interfaces
for i in range(0, len(ips[1]), 2):
for inters in ips[1][i + 1].netIF:
for h in range(0, len(ips[3]), 4):
if (ips[3][h] == ips[1][i + 1].name and ips[3][h + 1] == inters.name):
break
if ips[3][h+3] == "":
continue
for k in range(0, len(ips[4]), 2):
if(ips[4][k+1] == ips[3][h + 2]):
break
for j in range(0, len(ips[0]), 2):
if (ips[0][j] == ips[1][i]):
break
socketName = "%s/GINI/Shared_Switch@%s:%s/%s.ctl" % (ips[0][j+1], ips[0][j].split("@")[-1], ips[3][h+3], SOCKET_NAME)
if os.system("ssh %s test -e %s" % (ips[1][i], socketName)) == 0:
print "Using shared Switch from machine %s...\t" % ips[1][i]
continue
print "Starting Switch on machine %s for %s interface %s...\t" % (ips[1][i], ips[1][i + 1].name, inters.name),
# create directory on remote machine
subSwitchDir = "%s/GINI/%s_Switch_%s" % (ips[0][j + 1], ips[1][i + 1].name, inters.name)
os.system("ssh" + SSHOPTS + ips[1][i] + " rm -rf " + subSwitchDir)
time.sleep(GROUTER_WAIT)
os.system("ssh" + SSHOPTS + ips[1][i] + " mkdir " + subSwitchDir)
### ------- execute ---------- ###
# create script files to run on remote machines
scrpt = open("switch.sh", 'w')
remoteIP = ips[4][k]
if remoteIP.find("@") >= 0:
newremote = remoteIP.split("@")[1]
else:
newremote = remoteIP
command = "cd %s/\n%s -s %s.ctl -l uswitch.log -p uswitch.pid -u %d -r %s" % (subSwitchDir, VS_PROG, SOCKET_NAME, ips[3][h + 3], newremote)
if debug_uswitch:
command += " -d -d"
scrpt.write(command)
scrpt.close()
os.system("chmod 755 switch.sh")
os.system("scp" + SSHOPTS + " switch.sh " + ips[1][i] + ":" + subSwitchDir + "/" + " >> ip_test_log")
time.sleep(GROUTER_WAIT)
command = " %s/switch.sh&" % subSwitchDir
if debug_mode:
rinput = ""
while rinput != "y" and rinput != "n" and rinput != "e":
rinput = raw_input("Enter y/n to start device or e to exit: ")
if rinput == "y":
os.system("ssh" + SSHOPTS + ips[1][i] + command)
elif rinput == "e":
sys.exit(1)
else:
os.system("ssh" + SSHOPTS + ips[1][i] + command)
time.sleep(GROUTER_WAIT)
os.system("ssh" + SSHOPTS + ips[1][i] + " rm -rf" + command)
os.system("rm -rf switch.sh ")
print "[OK]"
# now that the switches are started, start the routers
print "\nStarting GINI Routers..."
success = createVR(myGINI, options, ips) and success
# now start the umls
print "\nStarting GINI UMLs..."
success = createVM(myGINI, options, ips) and success
return success
# distribute and start routers
def createVR(myGINI, options, ips):
"create router config file, and start the router"
logOut = file(LOG_FILE, 'a')
# rhandle = open('%s/tmp/remote_routers' % os.environ["GINI_HOME"], 'w')
if not independent:
sys.stdout = open("/dev/null", "w")
tm = Pyro.core.getProxyForURI(uri2)
sys.stdout = sys.__stdout__
for i in range(0, len(ips[1]), 2):
# find the specified directory on the remote machine
for j in range(0, len(ips[0]), 2):
if (ips[0][j] == ips[1][i]):
break
print "Starting Router %s on machine %s...\t" % (ips[1][i + 1].name, ips[1][i]),
# rhandle.write(ips[1][i + 1].name + " " + ips[1][i] + "\n")
sys.stdout.flush()
### ------ config ---------- ###
# create the router directory
subRouterDir = "%s/GINI/%s" % (ips[0][j + 1], ips[1][i + 1].name)
os.system("ssh" + SSHOPTS + ips[1][i] + " rm -rf " + subRouterDir)
time.sleep(GROUTER_WAIT)
os.system("ssh" + SSHOPTS + ips[1][i] + " mkdir " + subRouterDir)
configFile = "%s.conf" % GR_PROG
# create the config file
configOut = open(configFile, "w")
for nwIf in ips[1][i + 1].netIF:
remote_target = ""
isSwitch = False
sharedPort = ""
if nwIf.target.find("Switch") >= 0:
isSwitch = True
for k in range(0, len(ips[3]), 4):
if ips[3][k] == nwIf.target:
if not sharedPort and ips[3][k+2] == ips[1][i+1].name:
sharedPort = ips[3][k+3]
if isSwitch:
ips[3][k+1] = nwIf.network
for m in range(0, len(ips[4]), 2):
if ips[4][m+1] == nwIf.target:
remote_target = ips[4][m]
break
if remote_target == ips[0][j]:
if isSwitch:
socketName = "%s/GINI/%s/%s.ctl" % (ips[0][j+1], nwIf.target, SOCKET_NAME)
else:
socketName = "%s/GINI/%s/gini_socket_%s.ctl" % (ips[0][j + 1], ips[1][i + 1].name, nwIf.name);
else:
if type(sharedPort) == str:
sharedPort = sharedPort.split(" ")[-1]
socketName = "%s/GINI/Shared_Switch@%s:%s/%s.ctl" % (ips[0][j+1], ips[0][j].split("@")[-1], sharedPort, SOCKET_NAME)
if os.system("ssh %s test -e %s" % (ips[1][i], socketName)):
socketName = "%s/GINI/%s_Switch_%s/%s.ctl" % (ips[0][j + 1], ips[1][i + 1].name, nwIf.name, SOCKET_NAME);
configOut.write(getVRIFOutLine(nwIf, socketName))
configOut.close()
### ------- execute ---------- ###
# go to the router directory to execute the command
scrpt = open("router.sh", 'w')
command = "cd %s/\n%s --config=%s.conf --interactive=1 %s" % (subRouterDir, GR_PROG, GR_PROG, ips[1][i + 1].name)
if debug_mode:
print command
scrpt.write(command)
scrpt.close()
os.system("chmod 755 router.sh")
os.system("scp" + SSHOPTS + configFile + " router.sh " + ips[1][i] + ":" + subRouterDir + "/" + " >> ip_test_log")
command = "screen -d -m "
if (SCREEN_LOG):
command += "-L "
command += "-S %s ssh%s%s -t %s/router.sh" % (ips[1][i + 1].name, SSHOPTS, ips[1][i], subRouterDir)
if debug_mode:
rinput = ""
while rinput != "y" and rinput != "n" and rinput != "e":
rinput = raw_input("Enter y/n to start device or e to exit: ")
if rinput == "y":
os.system(command)
elif rinput == "e":
sys.exit(1)
else:
os.system(command)
if not independent:
tm.notify(ips[1][i+1].name, "", ips[1][i])
time.sleep(GROUTER_WAIT)
# os.system("ssh" + SSHOPTS + ips[1][i] + " rm -rf " + subRouterDir + "/router.sh")
os.system("rm -rf router.sh " + configFile)
print "[OK]"
logOut.close()
# rhandle.close()
return True
# distribute and start the umls
def createVM(myGINI, options, ips):
"create UML config file, and start the UML"
if not independent:
sys.stdout = open("/dev/null", "w")
tm = Pyro.core.getProxyForURI(uri2)
sys.stdout = sys.__stdout__
logOut = file(LOG_FILE, 'a')
# rhandle = open('%s/tmp/remote_UMLs', 'w')
for i in range(0, len(ips[2]), 2):
print "Starting UML %s on machine %s...\t" % (ips[2][i + 1].name, ips[2][i]),
# rhandle.write(ips[2][i + 1].name + " " + ips[2][i] + "\n")
for j in range(0, len(ips[0]), 2):
if (ips[0][j] == ips[2][i]):
break
# create the UML directory
sys.stdout.flush()
subUMLDir = "%s/GINI/%s" % (ips[0][j + 1], ips[2][i + 1].name)
os.system("ssh" + SSHOPTS + ips[2][i] + " rm -rf " + subUMLDir)
time.sleep(GROUTER_WAIT)
os.system("ssh" + SSHOPTS + ips[2][i] + " mkdir " + subUMLDir)
# create command line
command = createUMLCmdLine(ips[2][i + 1])
### ---- process the UML interfaces ---- ###
# it creates one config for each interface in the current directory
# and returns a string to be attached to the UML exec command line
for nwIf in ips[2][i + 1].interfaces:
for k in range(0, len(ips[3]), 4):
if ips[3][k+2] == ips[2][i + 1].name:
break
socketName = "%s/GINI/Shared_Switch@%s:%s/%s.ctl" % (ips[0][j+1], ips[0][j].split("@")[-1], ips[3][k+3], SOCKET_NAME)
if os.system("ssh %s test -e %s" % (ips[2][i], socketName)):
# name the socket, as per the specified switch
socketName = "%s/GINI/%s_Switch_%s/%s.ctl" % (ips[0][j + 1], ips[2][i + 1].name, nwIf.name, SOCKET_NAME)
# since "test" command returns 0 on success
if os.system("ssh %s test -e %s" % (ips[2][i], socketName)):
if ips[3][k].find("Router") >= 0:
socketName = "%s/GINI/%s/gini_socket_%s.ctl" % (ips[0][j + 1], ips[3][k], ips[3][k+1])
else:
socketName = "%s/GINI/%s/gini_socket.ctl" % (ips[0][j + 1], ips[3][k])
configFile = "%s.sh" % nwIf.name
# create the config file
configOut = open(configFile, "w")
configOut.write("ifconfig %s " % nwIf.name)
configOut.write("%s\n" % nwIf.ip)
for route in nwIf.routes:
if ips[3][k].find("Switch") >= 0:
redirect_net = ips[3][k+1]
if route.dest == redirect_net:
continue
configOut.write("route add -%s %s " % (route.type, route.dest))
configOut.write("netmask %s " % route.netmask)
configOut.write("gw %s\n" % route.gw)
configOut.close()
os.system("chmod 755 " + configFile)
# prepare the output line
outLine = "%s=daemon,%s,unix," % (nwIf.name, nwIf.mac)
outLine += socketName
command += "hostfs=$GINI_HOME %s " % outLine
scrpt = open("uml.sh", 'w')
# because the uml looks for a MAC_ADRS.sh file, we move it using this script
scrptcmd = "cd %s/\nmv %s $GINI_HOME/tmp/%s.sh" % (subUMLDir, configFile, nwIf.mac.upper())
scrpt.write(scrptcmd)
scrpt.close()
os.system("chmod 755 uml.sh")
os.system("scp" + SSHOPTS + configFile + " uml.sh " + ips[2][i] + ":" + subUMLDir + "/" + " >> ip_test_log")
time.sleep(GROUTER_WAIT)
runscrpt = " %s/uml.sh&" % subUMLDir
os.system("ssh" + SSHOPTS + ips[2][i] + runscrpt)
if os.system("ssh %s test -e '$GINI_HOME'/tmp/UML_bak" % ips[2][i]):
os.system("ssh %s mkdir '$GINI_HOME'/tmp/UML_bak" % ips[2][i])
os.system("ssh %s cp '$GINI_HOME'/tmp/%s.sh '$GINI_HOME'/tmp/UML_bak" % (ips[2][i], nwIf.mac.upper()))
time.sleep(GROUTER_WAIT)
os.system("ssh" + SSHOPTS + ips[2][i] + " rm -rf" + runscrpt)
os.system("rm -rf uml.sh " + configFile)
### ------- execute ---------- ###
# go to the UML directory to execute the command
scrpt = open("startit.sh", 'w')
cmd = "cd %s\n%s" % (subUMLDir, command)
scrpt.write(cmd)
scrpt.close()
os.system("chmod 755 startit.sh")
os.system("scp" + SSHOPTS + "startit.sh " + ips[2][i] + ":" + subUMLDir + "/" + " >> ip_test_log")
time.sleep(GROUTER_WAIT)
startUml = "screen -d -m -S %s ssh%s%s -t %s/startit.sh" % (ips[2][i + 1].name, SSHOPTS, ips[2][i], subUMLDir)
if debug_mode:
rinput = ""
while rinput != "y" and rinput != "n" and rinput != "e":
rinput = raw_input("Enter y/n to start device or e to exit: ")
if rinput == "y":
os.system(startUml)
elif rinput == "e":
sys.exit(1)
else:
os.system(startUml)
time.sleep(UML_WAIT_DELAY)
os.system("rm -rf startit.sh")
print "[OK]"
logOut.close()
# rhandle.close()
if not independent:
for i in range(0, len(ips[2]), 2):
tm.notify(ips[2][i+1].name, "", ips[2][i])
return True
# taken from gloader
def getVRIFOutLine(nwIf, socketName):
"convert the router network interface specs into a string"
outLine = "ifconfig add %s " % nwIf.name
outLine += "-socket %s " % socketName
outLine += "-addr %s " % nwIf.ip
outLine += "-network %s " % nwIf.network
outLine += "-hwaddr %s " % nwIf.nic
if (nwIf.gw):
outLine += "-gw %s " % nwIf.gw
if (nwIf.mtu):
outLine += "-mtu %s " % mwIf.mtu
outLine += "\n"
for route in nwIf.routes:
outLine += "route add -dev %s " % nwIf.name
outLine += "-net %s " % route.dest
outLine += "-netmask %s " % route.netmask
if (route.nexthop):
outLine += "-gw %s" % route.nexthop
outLine += "\n"
return outLine
# taken from gloader
def createUMLCmdLine(uml):
command = ""
## uml binary name
if (uml.kernel):
command += "%s " % uml.kernel
else:
command += "%s " % VM_PROG_BIN
## uml ID
command += "umid=%s " % uml.name
## handle the file system option
# construct the cow file name
fileSystemName = getBaseName(uml.fileSystem.name)
fsCOWName = "%s.cow" % fileSystemName
if (uml.fileSystem.type.lower() == "cow"):
command += "ubd0=%s,$GINI_HOME/%s " % (fsCOWName, fileSystemName)
else:
command += "ubd0=%s " % uml.fileSystem.name
## handle the mem option
if (uml.mem):
command += "mem=%s " % uml.mem
## handle the boot option
if (uml.boot):
command += "con0=%s " % uml.boot
return command
# taken from gloader
def getBaseName(pathName):
"Extract the filename from the full path"
pathParts = pathName.split("/")
return pathParts[len(pathParts)-1]
# stop the network
def undistGINI(myGINI, options, ips):
#don't use kill -9 -1
brute_force = False
os.system("rm -rf ip_test_log")
print "\nTerminating switches..."
print "\nTerminating routers..."
print "\nTerminating UMLs..."
print "\nCleaning the interprocess message queues"
# batch_ipcrm.clean_ipc_queues()
if brute_force:
# since we are working on remote machines, we don't care about
# the process there, so, we just ssh and kill -9 -1 (this kills
# all the process linked to the user
for i in range(0, len(ips[0]), 2):
os.system("ssh" + SSHOPTS + " " + ips[0][i] + " kill -9 -1")
if (not options.keepOld):
print "\nDeleting GINI related files on remote machine %s...\n" % ips[0][i]
command = " rm -rf %s/GINI" % ips[0][i + 1]
os.system("ssh" + SSHOPTS + " " + ips[0][i] + command)
return True
else:
for i in range(0, len(ips[0]), 2):
os.system("ssh" + SSHOPTS + " " + ips[0][i] + " killall -13 -u %s -q uswitch" % os.getenv("USER"))
os.system("ssh" + SSHOPTS + " " + ips[0][i] + " killall -u %s -q grouter glinux" % os.getenv("USER"))
if (not options.keepOld):
print "\nDeleting GINI related files on remote machine %s...\n" % ips[0][i]
time.sleep(0.5)
command = " rm -rf %s/GINI" % ips[0][i + 1]
os.system("ssh" + SSHOPTS + " " + ips[0][i] + command)
return True
# adapted from gloader with modifications
def checkProcAlive(procName, ipdirs):
alive = False
# grep the GINI processes
command = " ps aux | grep %s > %s" % (procName, GINI_TMP_FILE)
for i in range(0, len(ipdirs), 2):
os.system("ssh" + SSHOPTS + ipdirs[i] + command)
# analyse the grepped output
inFile = open(GINI_TMP_FILE)
line = inFile.readline()
while (line):
if (line.find("grep") == -1):
# don't consider the "grep" line
userName = os.environ["USER"]
lineParts = line.split()
if (lineParts[0] == userName):
# consider only the instances with the current user
alive = True
print "There is a live GINI %s on machine %s" % (procName, ipdirs[i])
line = inFile.readline()
inFile.close()
# clean up
os.remove(GINI_TMP_FILE)
return alive
# adapted from gloader with modifications
def writeSrcFile(options):
"write the configuration in the setup file"
outFile = open(SRC_FILENAME, "w")
outFile.write("%s\n" % options.xmlFile)
outFile.write("%s\n" % options.switchDir)
outFile.write("%s\n" % options.routerDir)
outFile.write("%s\n" % options.umlDir)
outFile.write("%s\n" % options.binDir)
outFile.write("%s\n" % options.ipSpecs)
outFile.close()
# taken from gloader
def deleteSrcFile():
"delete the setup file"
if (os.access(SRC_FILENAME, os.W_OK)):
os.remove(SRC_FILENAME)
else:
print "Could not delete the GINI setup file"
# adapted from gloader with modifications
def checkAliveGini(ips):
"check any of the gini components already running"
# Modified to check every machine in our ipSpecs file
result = False
if checkProcAlive(VS_PROG_BIN, ips[0]):
result = True
if checkProcAlive(VM_PROG_BIN, ips[0]):
result = True
if checkProcAlive(GR_PROG_BIN, ips[0]):
result = True
return result
#### -------------- MAIN start ----------------####
# create the program processor. This
# 1. accepts and process the command line options
# 2. creates XML processing engine, that in turn
# a) validates the XML file
# b) extracts the DOM object tree
# c) populates the GINI network class library
# d) performs some semantic/syntax checkings on
# the extracted specification
# e) validates the IP file for distribution
old = False
myProg = Start(sys.argv[0], SRC_FILENAME)
print SRC_FILENAME
if (not myProg.processOptions(sys.argv[1:])):
sys.exit(1)
options = myProg.options
# Get the valid IPs and directories from the ipSpecs file
# Also check if the IPs and directories are valid
# we validate by: scp a file into given machine and directory
# ssh and remove the file. Once this is validated, we don't
# have to error-check these operations anymore
if old:
ipfilehandle = open(options.ipSpecs, 'r')
lines = ipfilehandle.readlines()
iptest = open("ip_test_log", 'w')
iptest.close()
ginitest = open("gini_ip_test", 'w')
ginitest.write("This is a test file\nIt should not be here\nIt should have been deleted automatically\nDelete it if you can read this!!!")
ginitest.close()
ipdircombos = []
res = False
for line in lines:
a = line.split("\n")
b = a[0].split(":")
ipdircombos.append(b[0])
ipdircombos.append(b[1])
if ((not myProg.undistOpt) or (not options.keepOld)):
os.system("ssh" + SSHOPTS + b[0] + " rm -rf " + b[1] + "/GINI")
time.sleep(GROUTER_WAIT)
os.system("ssh" + SSHOPTS + b[0] + " mkdir " + b[1] + "/GINI")
i = os.system("scp" + SSHOPTS + "gini_ip_test " + a[0] + "/GINI/ >> ip_test_log")
if (not i == 0):
print "Problem with machine or directory %s" % a[0]
res = True
if (i == 0):
os.system("ssh" + SSHOPTS + b[0] + " rm -rf " + b[1] + "/GINI/gini_ip_test >> ip_test_log")
print "Machine and directory valid on %s" % a[0]
os.system("rm -rf gini_ip_test")
ipfilehandle.close()
if (res):
sys.exit(1)
# get the populated GINI network class
# its structure is the same as the XML specification
myGINI = myProg.giniNW
# We don't distribute switches
if (len(myGINI.switches) > 0):
print "\nCannot distriute switches...sorry"
print "These cannot be in the topology"
sys.exit(1)
# Let the user know about the number of IPs
total_ips_req = len(myGINI.vr) + len(myGINI.vm)
total_ips_giv = len(ipdircombos) / 2
# if (total_ips_req > total_ips_giv):
# print "\nThe given IPs aren't enough"
# print "There will be more than one GINI component on some machines\n"
ipvrcombos = []
ipvmcombos = []
ipcompcombos = []
j = 0
for i in range(len(myGINI.vr)):
ipvrcombos.append(ipdircombos[j])
ipvrcombos.append(myGINI.vr[i])
ipcompcombos.append(ipdircombos[j])
ipcompcombos.append(myGINI.vr[i].name)
j = (j + 2) % len(ipdircombos)
for i in range(len(myGINI.vm)):
ipvmcombos.append(ipdircombos[j])
ipvmcombos.append(myGINI.vm[i])
ipcompcombos.append(ipdircombos[j])
ipcompcombos.append(myGINI.vm[i].name)
j = (j + 2) % len(ipdircombos)
else:
if debug_mode:
print "checkpoint 1"
ipdircombos = []
ipvrcombos = []
ipvmcombos = []
ipvscombos = []
ipcompcombos = []
dev_dic = {}
hosts = []
rdfile = options.xmlFile[0:len(options.xmlFile)-4] + "_rdist"
rdhandle = open(rdfile, "r")
for line in rdhandle.readlines():
parts = line.strip().split(",")
if int(parts[1]):
if hosts.count(parts[2]):
pass
else:
hosts.append(parts[2])
for i in range(3, len(parts)):
dev_dic[parts[i]] = parts[2].split(":")[0]
rdhandle.close()
ginitest = open("gini_ip_test", 'w')
ginitest.write("This is a test file\nIt should not be here\nIt should have been deleted automatically\nDelete it if you can read this!!!")
ginitest.close()
res = False
if debug_mode:
print "checkpoint 2"
for host in hosts:
hostpath = host.split(":")
ipdircombos.append(hostpath[0])
if len(hostpath) < 2:
hostlogin = hostpath[0].split("@")
if len(hostlogin) < 2:
whoami = os.getenv("USER")
else:
whoami = hostlogin[0]
newpath = "/home/%s/gtemp" % whoami
hostpath.append(newpath)
host += ":" + hostpath[1]
if not myProg.undistOpt:
print "Warning, invalid remote path specified, defaulting to %s" % newpath
os.system("ssh" + SSHOPTS + hostpath[0] + " mkdir " + hostpath[1] + " 2> /dev/null")
else:
#os.system("ssh" + SSHOPTS + hostpath[0] + " rm -rf " + hostpath[1] + " 2> /dev/null")
pass
ipdircombos.append(hostpath[1])
if ((not myProg.undistOpt) and (not options.keepOld)):
os.system("ssh" + SSHOPTS + hostpath[0] + " rm -rf " + hostpath[1] + "/GINI")
time.sleep(GROUTER_WAIT)
os.system("ssh" + SSHOPTS + hostpath[0] + " mkdir " + hostpath[1] + "/GINI")
i = os.system("scp" + SSHOPTS + "gini_ip_test " + host + "/GINI/ >> ip_test_log")
if (not i == 0):
print "Problem with machine or directory %s" % host
res = True
if (i == 0):
os.system("ssh" + SSHOPTS + hostpath[0] + " rm -rf " + hostpath[1] + "/GINI/gini_ip_test >> ip_test_log")
print "Machine and directory valid on %s" % host
os.system("rm -rf gini_ip_test")
if (res):
sys.exit(1)
# get the populated GINI network class
# its structure is the same as the XML specification
myGINI = myProg.giniNW
# We don't distribute wireless components
if len(myGINI.vwr) > 0 or len(myGINI.vmb) > 0:
print "\nCannot distriute wireless devices...sorry"
print "These cannot be in the topology"
sys.exit(1)
# Let the user know about the number of IPs
total_ips_req = len(myGINI.vr) + len(myGINI.vm)
total_ips_giv = len(ipdircombos) / 2
# if (total_ips_req > total_ips_giv):
# print "\nThe given IPs aren't enough"
# print "There will be more than one GINI component on some machines\n"
if debug_mode:
print "checkpoint 3"
for router in myGINI.vr:
ipvrcombos.append(dev_dic[router.name])
ipvrcombos.append(router)
ipcompcombos.append(dev_dic[router.name])
ipcompcombos.append(router.name)
for uml in myGINI.vm:
ipvmcombos.append(dev_dic[uml.name])
ipvmcombos.append(uml)
ipcompcombos.append(dev_dic[uml.name])
ipcompcombos.append(uml.name)
for switch in myGINI.switches:
ipvscombos.append(dev_dic[switch.name])
ipvscombos.append(switch)
ipcompcombos.append(dev_dic[switch.name])
ipcompcombos.append(switch.name)
# Calculate switch port properties. If there is a
# link in the GINI topology between two components,
# then the switches for these components must have
# the same port number and their respective remote
# addresses should refer to each other
if debug_mode:
print "checkpoint 4"
ipports = []
for i in myGINI.vm:
for j in i.interfaces:
ipports.append(i.name)
ipports.append(j.name)
ipports.append(j.target)
ipports.append(0)
for i in myGINI.vr:
for j in i.netIF:
ipports.append(i.name)
ipports.append(j.name)
ipports.append(j.target)
ipports.append(0)
for i in myGINI.switches:
for j in range(2, len(ipports), 4):
if ipports[j] == i.name:
ipports.append(i.name)
ipports.append("")
ipports.append(ipports[j-2])
ipports.append(0)
if None:
print ipdircombos
print ipvrcombos
print ipvmcombos
print ipports
print ipcompcombos
print ipvscombos
answer = ""
while answer != "y" and answer != "n":
answer = raw_input("Continue?")
if answer == "n":
sys.exit(1)
# find available ports such that they match on both remote machines
# switches are initialized with port and remote ip fields but the
# host and remote ip are only talking to the same port (ie. if the
# host machine has a switch on port x then the remote machine must
# have a listening switch on port x as well
if debug_mode:
print "checkpoint 5"
if (not myProg.undistOpt):
switchport = 1115
spcombos = {}
for i in range(3, len(ipports), 4):
if ipports[i] == 0:
j = 1
# find the machines of the source and destination
while (not ipports[i - 3] == ipcompcombos[j]):
j += 2
k = 1
while (not ipports[i - 1] == ipcompcombos[k]):
k += 2
os.system("ssh" + SSHOPTS + ipcompcombos[j - 1] + " netstat -anp > m1ports 2>&1")
os.system("ssh" + SSHOPTS + ipcompcombos[k - 1] + " netstat -anp > m2ports 2>&1")
check = True
while (check):
command = "grep -w %d m1ports >> ip_test_log" % switchport
m1 = os.WEXITSTATUS(os.system(command))
command = "grep -w %d m2ports >> ip_test_log" % switchport
m2 = os.WEXITSTATUS(os.system(command))
if (m1 == 1 and m2 == 1):
# keep interconnected switch ports consistent
if ipports[i-3].find("Switch") >= 0:
if spcombos.has_key(ipports[i-3]):
ipports[i] = spcombos[ipports[i-3]] # use shared port
switchport -= 1 # will be incremented later
else:
ipports[i] = switchport
spcombos[ipports[i-3]] = switchport # define new shared port
elif ipports[i-1].find("Switch") >= 0:
if spcombos.has_key(ipports[i-1]):
ipports[i] = spcombos[ipports[i-1]]
switchport -= 1
else:
ipports[i] = switchport
spcombos[ipports[i-1]] = switchport
else:
ipports[i] = switchport
for x in range(3, len(ipports), 4):
# find reverse connections
if (ipports[i - 3] == ipports[x - 1] and ipports[i - 1] == ipports[x - 3]):
break
ipports[x] = switchport
switchport += 1
check = False
else:
switchport += 1
os.system("rm -rf m1ports m2ports")
if not old:
if debug_mode:
print "checkpoint 6"
for i in range(0, len(ipports), 4):
source = ipports[i]
destination = ipports[i+2]
# if machine of source and machine of destination are the same
if dev_dic[source] == dev_dic[destination]:
# signal to not create a switch between elements running on the same machine
if source.find("Router") >= 0 or destination.find("Router") >= 0:
ipports[i+3] = "" # clear port number
elif source.find("Switch") >= 0 or destination.find("Switch") >= 0:
ipports[i+3] = ""
elif source.find("Switch") >= 0:
for j in range(0, len(ipcompcombos), 2):
if ipcompcombos[j+1] == destination:
ipports[i+1] = ipcompcombos[j] # provide remote location instead of interface
break
elif source.find("UML") >= 0:
ipports[i+3] = "Shared_Switch %d" % ipports[i+3]
# Store the mappings so we can pass them around
ips = []
# Zeroth element is IP and Directory tuples
ips.append(ipdircombos)
# First element is IP and Router tuples
ips.append(ipvrcombos)
# Second element is IP and UML tuples
ips.append(ipvmcombos)
# Third element is Switch port configurations
ips.append(ipports)
# Forth element is IP and component tuples
# UMLs and Router in one list
ips.append(ipcompcombos)
ips.append(ipvscombos)
if debug_mode:
if debug_mode:
print "checkpoint 7"
print ips
answer = ""
while answer != "y" and answer != "n":
answer = raw_input("Continue?")
if answer == "n":
sys.exit(1)
# reset the log file
if (os.access(LOG_FILE, os.F_OK)):
os.remove(LOG_FILE)
# distribute or undistribute GINI network
print ""
if (myProg.undistOpt):
# terminate the current distributed specification
print "Terminating GINI network..."
success = undistGINI(myGINI, options, ips)
if (success):
print "\nGINI network is undistributed and terminated!!\n"
else:
print "\nThere are errors in GINI network termination"
print "Check the logfile %s for more details" % LOG_FILE
print "You might have to terminate the orphaned processes manually\n"
sys.exit(1)
else:
# create a distributed GINI instance
if (not options.keepOld):
# fail if a GINI already alive
if checkAliveGini(ips):
sys.exit(1)
# create network with current specifcation
print "Creating and distributing a GINI network..."
success = distGINI(myGINI, options, ips)
writeSrcFile(options)
if (success):
print "\nGINI network up, running and distributed!!\n"
else:
print "\nProblem in creating GINI network"
print "Check the log file %s for details" % LOG_FILE
print "** Run gdist -y to terminate the partially started ",
print "GINI instance before starting another one **\n"
sys.exit(1)
sys.exit(0)
|
|
# -*- coding: utf-8 -*-
'''
Module for interfacing with SysFS
.. seealso:: https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
.. versionadded:: 2016.3.0
'''
# Import python libs
from __future__ import absolute_import
import logging
import os
import stat
# Import external libs
import salt.ext.six as six
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on Linux
'''
return salt.utils.is_linux()
def attr(key, value=None):
'''
Access/write a SysFS attribute.
If the attribute is a symlink, it's destination is returned
:return: value or bool
CLI example:
.. code-block:: bash
salt '*' sysfs.attr block/sda/queue/logical_block_size
'''
key = target(key)
if key is False:
return False
elif os.path.isdir(key):
return key
elif value is not None:
return write(key, value)
else:
return read(key)
def write(key, value):
'''
Write a SysFS attribute/action
CLI example:
.. code-block:: bash
salt '*' sysfs.write devices/system/cpu/cpu0/cpufreq/scaling_governor 'performance'
'''
try:
key = target(key)
log.trace('Writing {0} to {1}'.format(value, key))
with salt.utils.fopen(key, 'w') as twriter:
twriter.write('{0}\n'.format(value))
return True
except: # pylint: disable=bare-except
return False
def read(key, root=''):
'''
Read from SysFS
:param key: file or path in SysFS; if key is a list then root will be prefixed on each key
:return: the full (tree of) SysFS attributes under key
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/net/em1/statistics
'''
if not isinstance(key, six.string_types):
res = {}
for akey in key:
ares = read(os.path.join(root, akey))
if ares is not False:
res[akey] = ares
return res
key = target(os.path.join(root, key))
if key is False:
return False
elif os.path.isdir(key):
keys = interfaces(key)
result = {}
for subkey in keys['r'] + keys['rw']:
subval = read(os.path.join(key, subkey))
if subval is not False:
subkeys = subkey.split('/')
subkey = subkeys.pop()
subresult = result
if len(subkeys):
for skey in subkeys:
if skey not in subresult:
subresult[skey] = {}
subresult = subresult[skey]
subresult[subkey] = subval
return result
else:
try:
log.trace('Reading {0}...'.format(key))
# Certain things in SysFS are pipes 'n such.
# This opens it non-blocking, which prevents indefinite blocking
with os.fdopen(os.open(key, os.O_RDONLY | os.O_NONBLOCK)) as treader:
# alternative method for the same idea, but only works for completely empty pipes
# treader = select.select([treader], [], [], 1)[0][0]
val = treader.read().strip()
if not val:
return False
try:
val = int(val)
except: # pylint: disable=bare-except
try:
val = float(val)
except: # pylint: disable=bare-except
pass
return val
except: # pylint: disable=bare-except
return False
def target(key, full=True):
'''
Return the basename of a SysFS key path
:param key: the location to resolve within SysFS
:param full: full path instead of basename
:return: fullpath or basename of path
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/ttyS0
'''
if not key.startswith('/sys'):
key = os.path.join('/sys', key)
key = os.path.realpath(key)
if not os.path.exists(key):
log.debug('Unkown SysFS key {0}'.format(key))
return False
elif full:
return key
else:
return os.path.basename(key)
def interfaces(root):
'''
Generate a dictionary with all available interfaces relative to root.
Symlinks are not followed.
CLI example:
.. code-block:: bash
salt '*' sysfs.interfaces block/bcache0/bcache
Output example:
.. code-block:: json
{
"r": [
"state",
"partial_stripes_expensive",
"writeback_rate_debug",
"stripe_size",
"dirty_data",
"stats_total/cache_hits",
"stats_total/cache_bypass_misses",
"stats_total/bypassed",
"stats_total/cache_readaheads",
"stats_total/cache_hit_ratio",
"stats_total/cache_miss_collisions",
"stats_total/cache_misses",
"stats_total/cache_bypass_hits",
],
"rw": [
"writeback_rate",
"writeback_rate_update_seconds",
"cache_mode",
"writeback_delay",
"label",
"writeback_running",
"writeback_metadata",
"running",
"writeback_rate_p_term_inverse",
"sequential_cutoff",
"writeback_percent",
"writeback_rate_d_term",
"readahead"
],
"w": [
"stop",
"clear_stats",
"attach",
"detach"
]
}
.. note::
* 'r' interfaces are read-only
* 'w' interfaces are write-only (e.g. actions)
* 'rw' are interfaces that can both be read or written
'''
root = target(root)
if root is False or not os.path.isdir(root):
log.error('SysFS {0} not a dir'.format(root))
return False
readwrites = []
reads = []
writes = []
for path, _, files in os.walk(root, followlinks=False):
for afile in files:
canpath = os.path.join(path, afile)
if not os.path.isfile(canpath):
continue
stat_mode = os.stat(canpath).st_mode
is_r = bool(stat.S_IRUSR & stat_mode)
is_w = bool(stat.S_IWUSR & stat_mode)
relpath = os.path.relpath(canpath, root)
if is_w:
if is_r:
readwrites.append(relpath)
else:
writes.append(relpath)
elif is_r:
reads.append(relpath)
else:
log.warn('Unable to find any interfaces in {0}'.format(canpath))
return {
'r': reads,
'w': writes,
'rw': readwrites
}
|
|
#
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
#
from atc_api.atcd_client import atcdClient
from atc_api.serializers import SettingSerializer, DeviceSerializer
from atc_api.settings import atc_api_settings
from atc_thrift.ttypes import TrafficControlException, TrafficControl
from atc_thrift.ttypes import TrafficControlledDevice, AccessToken
from functools import wraps
from rest_framework.exceptions import APIException
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
class BadGateway(APIException):
status_code = 502
default_detail = 'Could not connect to ATC gateway.'
def serviced(method):
'''
A decorator to check if the service is available or not.
Raise a BadGateway exception on failure to connect to the atc gateway
'''
@wraps(method)
def decorator(cls, request, *args, **kwargs):
service = atcdClient()
if service is None:
raise BadGateway()
return method(cls, request, service, *args, **kwargs)
return decorator
class AtcApi(APIView):
'''
If `address` is not provided, we default to the client IP or forwarded IP
'''
@serviced
def get(self, request, service, address=None, format=None):
''''
Get the current shaping for an IP. If address is None, defaults to
the client IP
@return the current shaping applied or 404 if the IP is not being
shaped
'''
device_serializer = DeviceSerializer(
data=request.DATA,
context={'request': request, 'address': address},
)
if not device_serializer.is_valid():
raise ParseError(detail=device_serializer.errors)
dev = device_serializer.save()
try:
tc = service.getCurrentShaping(dev)
except TrafficControlException as e:
return Response(
{'detail': e.message},
status=status.HTTP_404_NOT_FOUND,
)
serializer = SettingSerializer(tc.settings)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
@serviced
def post(self, request, service, address=None, format=None):
''''
Set shaping for an IP. If address is None, defaults to
the client IP
@return the profile that was set on success
'''
setting_serializer = SettingSerializer(data=request.DATA)
device_serializer = DeviceSerializer(
data=request.DATA,
context={'request': request, 'address': address},
)
if not setting_serializer.is_valid():
raise ParseError(detail=setting_serializer.errors)
if not device_serializer.is_valid():
raise ParseError(detail=device_serializer.errors)
setting = setting_serializer.save()
device = device_serializer.save()
tc = TrafficControl(
device=device,
settings=setting,
timeout=atc_api_settings.DEFAULT_TC_TIMEOUT,
)
try:
tcrc = service.startShaping(tc)
except TrafficControlException as e:
return Response(e.message, status=status.HTTP_401_UNAUTHORIZED)
result = {'result': tcrc.code, 'message': tcrc.message}
if tcrc.code:
raise ParseError(detail=result)
return Response(
setting_serializer.data,
status=status.HTTP_201_CREATED
)
@serviced
def delete(self, request, service, address=None, format=None):
'''
Delete the shaping for an IP, if no IP is specified, default to the
client IP
'''
device_serializer = DeviceSerializer(
data=request.DATA,
context={'request': request, 'address': address},
)
if not device_serializer.is_valid():
return Response(
device_serializer.errors,
status=status.HTTP_400_BAD_REQUEST,
)
device = device_serializer.save()
try:
tcrc = service.stopShaping(device)
except TrafficControlException as e:
return Response(e.message, status=status.HTTP_401_UNAUTHORIZED)
result = {'result': tcrc.code, 'message': tcrc.message}
if tcrc.code:
raise ParseError(detail=result)
return Response(status=status.HTTP_204_NO_CONTENT)
class AuthApi(APIView):
@serviced
def get(self, request, service, address=None):
'''
Returns the addresses that the provided address is allowed to shape.
'''
if address is None:
address = _get_client_ip(request)
controlled_ips = []
for addr in service.getDevicesControlledBy(address):
if addr is None:
break
controlled_ips.append({
'controlled_ip': addr.device.controlledIP,
'valid_until': addr.timeout,
})
data = {
'address': address,
'controlled_ips': controlled_ips,
}
return Response(data, status=status.HTTP_200_OK)
@serviced
def post(self, request, service, address=None):
'''
Authorizes one address to shape another address,
based on the provided auth token.
'''
if address is None:
return Response(
{'details': 'no address provided'},
status=status.HTTP_400_BAD_REQUEST
)
controlled_ip = address
controlling_ip = _get_client_ip(request)
if 'token' not in request.data:
token = None
else:
token = AccessToken(token=request.data['token'])
dev = TrafficControlledDevice(
controlledIP=controlled_ip,
controllingIP=controlling_ip
)
worked = service.requestRemoteControl(dev, token)
if not worked:
return Response(
{'details': 'invalid token provided'},
status=status.HTTP_401_UNAUTHORIZED,
)
print 'Worked:', worked
data = {
'controlling_ip': controlling_ip,
'controlled_ip': controlled_ip,
}
return Response(data, status=status.HTTP_200_OK)
class TokenApi(APIView):
@serviced
def get(self, request, service):
'''
Returns the current authorization token for the provided address.
'''
# default duration...
# 3 days in seconds
duration = 3 * 24 * 60 * 60
if 'duration' in request.query_params:
duration = int(request.query_params['duration'])
address = _get_client_ip(request)
stuff = service.requestToken(address, duration)
data = {
'token': stuff.token,
'interval': stuff.interval,
'valid_until': stuff.valid_until,
'address': address,
}
return Response(data, status=status.HTTP_200_OK)
def _get_client_ip(request):
'''Return the real IP of a client even when using a proxy'''
if 'HTTP_X_REAL_IP' in request.META:
if request.META['REMOTE_ADDR'] not in atc_api_settings.PROXY_IPS:
raise ValueError('HTTP_X_REAL_IP set by non-proxy')
return request.META['HTTP_X_REAL_IP']
else:
return request.META['REMOTE_ADDR']
|
|
# -*- coding: utf-8 -*-
import sys
import argparse
import re
import datetime
from string import maketrans
from fatool import *
from decimal import *
import logging
def main():
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
#logger.setLevel(logging.DEBUG)
parser = argparse.ArgumentParser()
#parser.add_argument('-f', '--fafile', help='file to be cut usualy *.fa', type=argparse.FileType('r'), required=True)
parser.add_argument('-v', '--version', help='display version number and exit', action='version', version='%(prog)s 0.3.1')
subparsers = parser.add_subparsers(title='fatool commands', help='each has own params, for more details use: command -h')
sub_cut = subparsers.add_parser('cut', help='split supplied sequence into smaller parts, according to given params')
sub_cut.add_argument('-f', '--fafile', help='file to be cut usualy *.fa', type=argparse.FileType('r'), required=True)
sub_cut.add_argument('-r', '--range', help='cutted sequence length', type=int, required=True)
sub_cut.add_argument('-o', '--output', help='output file default: output.fa', type=argparse.FileType('w'), default='output.fa')
sub_cut.add_argument('-s', '--step', help='step length default: 1', type=int, default=1)
sub_cut.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_cut.add_argument('--operator', help='user who have fired script it will be noted in report', type=str)
sub_cut.set_defaults(func=cut_fa)
sub_en = subparsers.add_parser('extractNames', help='extracting contigs names only')
sub_en.add_argument('-f', '--fafile', help='file to be cut usualy *.fa', type=argparse.FileType('r'), required=True)
sub_en.add_argument('-o', '--output', help='output file if not supplied stdout', type=argparse.FileType('w'))
sub_en.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_en.add_argument('--operator', help='user who have fired script it will be noted in report', type=str)
sub_en.set_defaults(func=extract_names)
sub_ec = subparsers.add_parser('extractContigs', help='extracting contigs specified in file (output in new file)')
sub_ec.add_argument('-f', '--fafile', help='file to be cut usualy *.fa', type=argparse.FileType('r'), required=True)
sub_ec.add_argument('--list', help='file containing list of contigs one contig per line', type=argparse.FileType('r'), required=True)
sub_ec.add_argument('-o', '--output', help='output file; if --multifile is set output directory', type=str, required=True)
sub_ec.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_ec.add_argument('--operator', help='user who have fired script it will be noted in report', type=str)
sub_ec.add_argument('--multifile', help='if this flag is set each contig will be saved in separate file', action='store_true')
sub_ec.set_defaults(func=extract_contigs)
sub_rc = subparsers.add_parser('remContigs', help='removing contigs specified in file (output in new file)')
sub_rc.add_argument('-f', '--fafile', help='file to be cut usualy *.fa', type=argparse.FileType('r'), required=True)
sub_rc.add_argument('--list', help='file containing list of contigs one contig per line', type=argparse.FileType('r'), required=True)
sub_rc.add_argument('-o', '--output', help='output file if not supplied stdout', type=str, required=True)
sub_rc.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_rc.add_argument('--operator', help='user who have fired script it will be noted in report', type=str)
sub_rc.set_defaults(func=remove_contigs)
sub_jc = subparsers.add_parser('join', help='joining two or more files, yet not verifing duplicates')
sub_jc.add_argument('-f', '--fafile', help='file to be cut usualy *.fa', type=argparse.FileType('r'), required=True)
sub_jc.add_argument('-o', '--output', help='output file if not supplied stdout', type=argparse.FileType('w'), required=True)
sub_jc.add_argument('--files', help='files to be joined', nargs='*', type=argparse.FileType('r'))
sub_jc.add_argument('--overwrite', help='if set owerwrites contigs with same name', action='store_true')
sub_jc.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_jc.add_argument('--operator', help='user who have fired script it will be noted in report', type=str)
sub_jc.set_defaults(func=join)
sub_sc = subparsers.add_parser('split', help='each cotig saved into separate file')
sub_sc.add_argument('-f', '--fafile', help='file to be cut usualy *.fa', type=argparse.FileType('r'), required=True)
sub_sc.add_argument('-d', '--outputDir', help='output directory where splited contigs will be saved', type=str, required=True)
sub_sc.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_sc.add_argument('--operator', help='user who have fired script it will be noted in report', type=str)
sub_sc.set_defaults(func=split_contigs)
sub_r = subparsers.add_parser('reverse', help='reverse all sequences in file')
sub_r.add_argument('-f', '--fafile', help='file to be cut usualy *.fa', type=argparse.FileType('r'), required=True)
sub_r.add_argument('-o', '--output', help='output file; if --multifile is set output directory', type=argparse.FileType('w'), required=True)
sub_r.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_r.add_argument('--operator', help='user who have fired script it will be noted in report', type=str)
sub_r.set_defaults(func=reverse)
sub_v = subparsers.add_parser('validate', help='validates fa file')
sub_v.add_argument('-f', '--fafile', help='file to be cut usualy *.fa', type=argparse.FileType('r'), required=True)
sub_v.add_argument('-t', '--type', help='type of sequence 0 - general, 1 DNA, 2 - amino', type=int, required=True)
sub_v.add_argument('--details', help='set if you want to see detaild validation info', action='store_true')
sub_v.set_defaults(func=validate)
sub_s = subparsers.add_parser('stats', help='show statistics of fa file')
sub_s.add_argument('-f', '--fafile', help='file to show statistics usualy *.fa', type=argparse.FileType('r'), required=True)
sub_s.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_s.add_argument('--operator', help='user who have fired script it will be noted in report', nargs='*', type=str)
sub_s.set_defaults(func=statistics)
'''
sub_fm = subparsers.add_parser('findMotif', help='display motifs position in contig')
sub_fm.add_argument('-f', '--fafile', help='file to show statistics usualy *.fa', type=argparse.FileType('r'), required=True)
sub_fm.add_argument('--mml', help='mismatch level number of allowed missmatches in primers (detfault 0)', type=str, default=0)
sub_fm.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_fm.add_argument('--operator', help='user who have fired script it will be noted in report', nargs='*', type=str)
sub_fm.set_defaults(func=find_motif)
'''
sub_fp = subparsers.add_parser('findPrimer', help='display list of founded primers')
sub_fp.add_argument('-f', '--fafile', help='file to show statistics usualy *.fa', type=argparse.FileType('r'), required=True)
sub_fp.add_argument('--start', help='strat codon 5\'', type=str, required=True)
sub_fp.add_argument('--stop', help='stop codon 3\'', type=str, required=True)
sub_fp.add_argument('--mode', help='FF (start forward, stop forward) or FR (start 5\' stop 3\')', type=str, choices=['FF', 'FR'], default = 'FR', required=True)
sub_fp.add_argument('--minlen', help='minimum length (detfault 50bp)', type=int, default=50)
sub_fp.add_argument('--maxlen', help='max length (detfault 1000bp)', type=int, default=1000)
sub_fp.add_argument('--mml', help='mismatch level number of allowed missmatches in primers (detfault 0)', type=int, default=0)
sub_fp.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_fp.add_argument('--operator', help='user who have fired script it will be noted in report', nargs='*', type=str)
sub_fp.set_defaults(func=find_primers)
sub_cn = subparsers.add_parser('cutName', help='cuts name from position to given length')
sub_cn.add_argument('-f', '--fafile', help='file to show statistics usualy *.fa', type=argparse.FileType('r'), required=True)
sub_cn.add_argument('--start', help='start of cut', type=int, required=True)
sub_cn.add_argument('-l', '--length', help='length of cut', type=int, required=True)
sub_cn.set_defaults(func=cut_name)
sub_lnam = subparsers.add_parser('cutNameMarker', help='cuts name leaving defined number of chars after begining of marker')
sub_lnam.add_argument('-f', '--fafile', help='file to show statistics usualy *.fa', type=argparse.FileType('r'), required=True)
sub_lnam.add_argument('-m', '--marker', help='marker that indicates start of cut', type=str, required=True)
sub_lnam.add_argument('-l', '--length', help='length of cut', type=int, required=True)
sub_lnam.add_argument('--keepMarker', help='weather to keep marker or not default 1 (Yes)', type=int, required=True)
sub_lnam.add_argument('-o', '--output', help='output file default: output.fa', type=argparse.FileType('w'), default='output.fa')
#sub_lnam.add_argument('-d', '--outputDir', help='output directory where multiple contigs will be saved', type=str)
sub_lnam.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_lnam.add_argument('--operator', help='user who have fired script it will be noted in report', nargs='*', type=str)
sub_lnam.set_defaults(func=cut_name_pattern)
sub_trn_d2p = subparsers.add_parser('translateDNA2Proteins', help='display translation to proteins')
sub_trn_d2p.add_argument('-f', '--fafile', help='file to show statistics usualy *.fa', type=argparse.FileType('r'), required=True)
sub_trn_d2p.add_argument('-o', '--output', help='output file default: output.fa', type=argparse.FileType('w'), default='output.fa')
sub_trn_d2p.add_argument('--startCodons', help='list of start codons separated by space bar', nargs='*', type=str)
sub_trn_d2p.add_argument('--stopCodons', help='list of stop codons separated by space bar', nargs='*', type=str)
sub_trn_d2p.add_argument(
'--tdict', help='Which dictionary use for translation: STD - standard, VMTO - Vertebrate Mitochondrial, YMTO - Yeast Mitochondrial, BAPP - Bacterial Archaeal Plant and Plastid',
type=str, choices=['STD', 'VMTO', 'YMTO', 'BAPP'], default = 'STD'
)
sub_trn_d2p.add_argument('--nss', help='No Start Stop', action='store_true')
sub_trn_d2p.add_argument('--report', help='report results into file if not supplied stdout', type=argparse.FileType('w'))
sub_trn_d2p.add_argument('--operator', help='user who have fired script it will be noted in report', nargs='*', type=str)
sub_trn_d2p.set_defaults(func=translate_dna_to_protein)
sub_2fq = subparsers.add_parser('cnv2fq', help='converts *.FASTA to *.FQ')
sub_2fq.add_argument('-f', '--fafile', help='file to convert *.fa', type=argparse.FileType('r'), required=True)
sub_2fq.add_argument('-o', '--output', help='file to output as *.fq', type=argparse.FileType('w'), required=True)
sub_2fq.add_argument('-q', '--quality', help='quality score to add to reads', type=int, required=True)
sub_2fq.set_defaults(func=convert_to_fq)
args = parser.parse_args()
args.func(args)
def resolve_operator(operator_arg_list):
# makes prity print of opoerator
op = ''
for r in operator_arg_list:
op += r+' '
return op.rstrip()
def make_log_header(cmd, op):
stats_rep = '\n-------------------------------------------------------------'
stats_rep +='\ncmdfatool '+str(cmd)+' \n\nstarted:\t'+str(datetime.datetime.now())
if op:
stats_rep += '\nOperator:\t'+resolve_operator(op)
stats_rep += '\n-------------------------------------------------------------\n'
return stats_rep
def cut_fa(args):
#logging.basicConfig(level=logging.ERROR)
#logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.debug('debug mode started')
logger.info('command: cut starting')
rep = str(make_log_header('cut', args.operator))
fafile = args.fafile
output = args.output
split_range = args.range
step = args.step
f = Fa.load_from_file(fafile)
logger.info('file: '+fafile.name+' loaded')
contig_list = []
for r in f.contigs:
contig_list += r.cut(split_range, step)
logger.info('cutted contigs added from conting: '+r.name)
result_fa = Fa(contig_list, 'splited')
logger.info('trying to write file')
result_fa.write(output)
logger.info('file written')
rep += '\n\n------------------------------------------------------'
rep += '\nFinished:\t'+str(datetime.datetime.now())
if args.report:
with args.report as log_file:
log_file.write(rep)
def extract_names(args):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.info('command: extractNames starting')
rep = str(make_log_header('extractNames', args.operator))
fafile = args.fafile
#output = args.output
fa = Fa.load_from_file(fafile)
names = fa.show_names()
if args.output:
with args.output as o:
for r in names:
o.write(r+'\n')
else:
for r in names:
print r
rep += 'Number of neames founded:\t' + str(len(names))
rep += '\n\n------------------------------------------------------'
rep += '\nFinished:\t'+str(datetime.datetime.now())
if args.report:
with args.report as log_file:
log_file.write(rep)
def extract_contigs(args):
# default all extracted contigs in one file
# with flag multifile save each contig to separate file
rep = str(make_log_header('extractContigs', args.operator))
fa = Fa.load_from_file(args.fafile)
rep += 'Number of contigs in orginal file:\t'+str(len(fa.contigs))
#file with contigs names one per line
with args.list as cntgs:
elist = [c.strip() for c in cntgs]
result_fa = fa.extract(elist)
if( args.multifile):
result_fa.write_multiple_files(args.output)
else:
result_fa.write(args.output)
rep += '\nContigs to remove:\t'+str(len(elist))
rep += '\Extracted contigs:\t'+str(len(result_fa.contigs))
rep += '\n\n------------------------------------------------------'
rep += '\nFinished:\t'+str(datetime.datetime.now())
if args.report:
with args.report as log_file:
log_file.write(rep)
else:
print rep
def remove_contigs(args):
# contigs from list are removed, others saved to file
rep = str(make_log_header('remContigs', args.operator))
fa = Fa.load_from_file(args.fafile)
rep += 'Number of contigs in orginal file:\t'+str(len(fa.contigs))
# file that contains list of contigs one per line
with args.list as cntgs:
rlist = [c.strip() for c in cntgs]
rep += 'Number of contigs to remove:\t'+len(rlist)
result_fa = fa.remove(rlist)
rep += 'Number of contigs after remove:\t'+str(len(fa.contigs))
rep += 'Contigs removed:\t'+str(len(fa.contigs) - len(result_fa.contigs))
result_fa.write(args.output)
rep += '\n\n------------------------------------------------------'
rep += '\nFinished:\t'+str(datetime.datetime.now())
if args.report:
with args.report as log_file:
log_file.write(stats_rep)
else:
print stats_rep
def join(args):
# joins contig from multiple files
rep = str(make_log_header('join', args.operator))
fa = Fa.load_from_file(args.fafile)
fa_list = []
contigs_to_add = 0
# list of Fa files to join.
for r in args.files:
if len(r) > 0:
fa2add = Fa.load_from_file(r)
fa_list.append(fa2add)
contigs_to_add += fa2add.count_contigs()
rep += '\nOrginal contigs number:\t'+Fa.count_contigs()
rep += '\nTotal files to join with orginal file:\t'+len(args.files)
rep += '\nTotal contigs to add:\t'+str(contigs_to_add)
fa.join(fa_list, args.overwrite)
rep += '\nNumber of contigs after join:\t'+str(fa.count_contigs())
fa.write(args.output)
rep += '\n\n------------------------------------------------------'
rep += '\nFinished:\t'+str(datetime.datetime.now())
if args.report:
with args.report as log_file:
log_file.write(stats_rep)
else:
print stats_rep
def split_contigs(args):
#writes each contig in single file
rep = str(make_log_header('split', args.operator))
fa = Fa.load_from_file(args.fafile)
fa.write_multiple_files(args.output)
rep += '\n\n------------------------------------------------------'
rep += '\nFinished:\t'+str(datetime.datetime.now())
if args.report:
with args.report as log_file:
log_file.write(rep)
else:
print rep
def statistics(args):
# returns statistics of fa file
stats_rep = str(make_log_header('stats', args.operator))
fa = Fa.load_from_file(args.fafile)
stats = fa.statistics()
stats_rep += '\n\nNumber of N:\t'+str(stats['N'])
stats_rep += '\nNumber of A:\t'+str(stats['A'])
stats_rep += '\nNumber of C:\t'+str(stats['C'])
stats_rep += '\nNumber of T:\t'+str(stats['T'])
stats_rep += '\nNumber of G:\t'+str(stats['G'])
getcontext().rounding = ROUND_05UP
getcontext().prec = 4
stats_rep += '\nGC[%] (0.5 up):\t'+str(Decimal(stats['G']+stats['C'])/stats['L']*Decimal(100.00))
stats_rep += '\n\nTotal length:\t'+str(stats['L'])
stats_rep += '\nTotal contigs:\t'+str(stats['totalc'])
stats_rep += '\n\ncontigs 1000-5000bp:\t'+str(stats['nbp1000'])
stats_rep += '\ncontigs 1000-5000bp length:\t'+str(stats['lbp1000'])
stats_rep += '\ncontigs 5001-10000bp:\t'+str(stats['nbp5000'])
stats_rep += '\ncontigs 5001-10000bp length:\t'+str(stats['lbp5000'])
stats_rep += '\ncontigs 10001-25000bp:\t'+str(stats['nbp10000'])
stats_rep += '\ncontigs 10001-25000bp length:\t'+str(stats['lbp10000'])
stats_rep += '\ncontigs 25001-50000bp:\t'+str(stats['nbp25000'])
stats_rep += '\ncontigs 25001-50000bp length:\t'+str(stats['lbp25000'])
stats_rep += '\ncontigs 50001+bp:\t'+str(stats['nbp50000'])
stats_rep += '\ncontigs 50001+bp length:\t'+str(stats['lbp50000'])
stats_rep += '\n\ncontigs > 1000bp:\t'+str(stats['nbp1000']+stats['nbp5000']+stats['nbp10000']+stats['nbp25000']+stats['nbp50000'])
stats_rep += '\ncontigs > 1000bp length:\t'+str(stats['lbp1000']+stats['lbp5000']+stats['lbp10000']+stats['lbp25000']+stats['nbp50000'])
stats_rep += '\ncontigs > 5000bp:\t'+str(stats['nbp5000']+stats['nbp10000']+stats['nbp25000']+stats['nbp50000'])
stats_rep += '\ncontigs > 5000bp length:\t'+str(stats['lbp5000']+stats['lbp10000']+stats['lbp25000']+stats['nbp50000'])
stats_rep += '\ncontigs > 10000bp:\t'+str(stats['nbp10000']+stats['nbp25000']+stats['nbp50000'])
stats_rep += '\ncontigs > 10000bp length:\t'+str(stats['lbp10000']+stats['lbp25000']+stats['nbp50000'])
stats_rep += '\ncontigs > 25000bp:\t'+str(stats['nbp25000']+stats['nbp50000'])
stats_rep += '\ncontigs > 25000bp length:\t'+str(stats['lbp25000']+stats['nbp50000'])
stats_rep += '\ncontigs > 50000bp:\t'+str(stats['nbp50000'])
stats_rep += '\ncontigs > 50000bp length:\t'+str(stats['nbp50000'])
stats_rep += '\nLongest contig:\t'+str(stats['longest'])
stats_rep += '\n\nN50:\t'+str(stats['N50'])
stats_rep += '\nL50:\t'+str(stats['L50'])
stats_rep += '\nN75:\t'+str(stats['N75'])
stats_rep += '\nL75:\t'+str(stats['L75'])
stats_rep += '\nN90:\t'+str(stats['N90'])
stats_rep += '\nL90:\t'+str(stats['L90'])
stats_rep += '\n\n------------------------------------------------------'
stats_rep += '\nFinished:\t'+str(datetime.datetime.now())
if args.report:
with args.report as log_file:
log_file.write(stats_rep)
else:
print stats_rep
def validate(args):
# check if fa is valid
rep = str(make_log_header('validate', args.operator))
fa = Fa.load_from_file(args.fafile)
result_list = {}
if args.details:
for r in fa.contigs:
result_list[r.name] = Sequence.detailed_validate_generic(r, '[^ACGNTUBDHKMRSVWY\-\nacgntubdhkmrsvwy]')
else:
for r in fa.contigs:
result_list[r.name] = Sequence.validate_generic(r, '[^ACGNTUBDHKMRSVWY\-\nacgntubdhkmrsvwy]')
#print result_list
for r in result_list:
rep += r +'\n'
rep += '\n\n------------------------------------------------------'
rep += '\nFinished:\t'+str(datetime.datetime.now())
if args.report:
with args.report as log_file:
log_file.write(rep)
else:
print rep
def reverse(args):
rep = str(make_log_header('reverse', args.operator))
fa = Fa.load_from_file(args.fafile)
fa.reverse()
fa.write(args.output)
rep += '\n\n------------------------------------------------------'
rep += '\nFinished:\t'+str(datetime.datetime.now())
if args.report:
with args.report as log_file:
log_file.write(rep)
else:
print rep
def find_motif(args):
print 'not available yet'
pass
def find_primers(args):
rep = str(make_log_header('reverse', args.operator))
fa = Fa.load_from_file(args.fafile)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.debug(args)
rep = ''
for r in fa.contigs:
rep += '\n================\n\t\t'+r.name+'\n'
for q in r.find_aprox_primers(args.start, args.stop, str(args.mode), int(args.mml), args.minlen, args.maxlen):
rep += q+'\n'
rep += '\n\n------------------------------------------------------'
rep += '\nFinished:\t'+str(datetime.datetime.now())
if args.report:
with args.report as log_file:
log_file.write(rep)
else:
print rep
def cut_name_pattern(args):
rep = str(make_log_header('cutNameMarker', args.operator))
fa = Fa.load_from_file(args.fafile)
for r in fa.contigs:
r.leave_name_after_marker(args.marker, args.length, args.keepMarker)
fa.write(args.output)
def print_frame_output(r_dict):
i = 0
otp = ''
for f in r_dict:
otp += 'FRAME:\t'+str(i+1)+'\n'
otp += '\nBEFORE:\t '+f[0]
otp += '\nTRANSLATION:\n\n'+f[1]
otp += '\n\nAFTER:\t '+f[2]
otp += '\n------------------------------------------------\n'
i+=1
return otp
def translate_dna_to_protein(args):
rep = str(make_log_header('translate2protein', args.operator))
fa = Fa.load_from_file(args.fafile)
if args.tdict == 'STD':
tdict = Sequence.tdict_standard
elif args.tdict == 'VMTO':
tdict = Sequence.tdict_vertebrate_mitochondrial
elif args.tdict == 'YMTO':
tdict = Sequence.tdict_yeast_mitochondrial
elif args.tdict == '????????':
tdict = Sequence.tdict_standard
elif args.tdict == 'BAPP':
tdict = Sequence.tdict_bacterial_archaeal_plant_plastid
else:
print 'applied dictionary is not valid!'
exit(1)
r_dict = {}
otp = ''
if args.nss:
for r in fa.contigs:
r_dict = r.translate2protein(tdict)
otp += '\n=============================\n'+r.name+'\n=============================\n'
otp += '\nFORWARD\n\n'
otp += print_frame_output(r_dict['fwd'])
otp += '\n'+'='*15+'\n'
otp += '\nREVERS\n\n'
otp += print_frame_output(r_dict['rev'])
rep += otp
else:
for r in fa.contigs:
r_dict = r.translate2protein_in_range(args.startCodons, args.stopCodons, tdict)
otp += '\n=============================\n'+r.name+'\n=============================\n'
otp += 'FORWARD\n\n'
i = 0
for f in r_dict['fwd']:
otp += 'FRAME:\t'+str(i+1)+'\n'
for k in f:
otp += '\n'+k[0]+' start: '+str(k[1])
otp += '\n------------------------------------------------\n'
otp += '\n'+'='*15+'\n'
i += 1
otp += 'REVERS\n\n'
i = 0
for f in r_dict['rev']:
otp += 'FRAME:\t'+str(i+1)+'\n'
for k in f:
otp += '\n'+k[0]+' start: '+str(k[1])
otp += '\n------------------------------------------------\n'
i += 1
rep += otp
fa.write(args.output)
rep += '\n\n------------------------------------------------------'
rep += '\nFinished:\t'+str(datetime.datetime.now())
if args.report:
with args.report as log_file:
log_file.write(rep)
else:
print rep
def cut_name(args):
pass
def convert_to_fq(args):
fa = Fa.load_from_file(args.fafile)
#fq = fa.convert_to_fq(args.quality)
i = 1
with args.output as w:
for r in fa.contigs:
q = chr(33+args.quality)*len(r)
#n = self.name.replace('>', '@')
#n = n.replace(' ','_')
n = '@EAS123:100:FC123VJ:2:'+str(i)+':'+str(i*7)+':'+str(i*8)+' 1:N:18:1'
i += 1
#nlist.append(Sequence(n, r.seq, q))
w.write(str(Sequence(n, r.seq, q)))
#fq.write(args.output)
if __name__ == '__main__':
exit(main())
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A remote procedure call (rpc) abstraction.
For some wrappers that add message versioning to rpc, see:
rpc.dispatcher
rpc.proxy
"""
from oslo.config import cfg
from ironic.openstack.common import importutils
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
rpc_opts = [
cfg.StrOpt('rpc_backend',
default='%s.impl_kombu' % __package__,
help="The messaging module to use, defaults to kombu."),
cfg.IntOpt('rpc_thread_pool_size',
default=64,
help='Size of RPC thread pool'),
cfg.IntOpt('rpc_conn_pool_size',
default=30,
help='Size of RPC connection pool'),
cfg.IntOpt('rpc_response_timeout',
default=60,
help='Seconds to wait for a response from call or multicall'),
cfg.IntOpt('rpc_cast_timeout',
default=30,
help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['nova.exception',
'cinder.exception',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
' upon receiving exception data from an rpc call.'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
cfg.StrOpt('control_exchange',
default='openstack',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
CONF = cfg.CONF
CONF.register_opts(rpc_opts)
def set_defaults(control_exchange):
cfg.set_defaults(rpc_opts,
control_exchange=control_exchange)
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
For some example usage of creating a connection and some consumers on that
connection, see nova.service.
:param new: Whether or not to create a new connection. A new connection
will be created by default. If new is False, the
implementation is free to return an existing connection from a
pool.
:returns: An instance of openstack.common.rpc.common.Connection
"""
return _get_impl().create_connection(CONF, new=new)
def call(context, topic, msg, timeout=None):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
return _get_impl().call(CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast(CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
This method will get invoked on all consumers that were set up with this
topic name and fanout=True.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=True.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast(CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
separate messages, so the return values can be processed as the come in via
an iterator.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
returned and X is the Nth value that was returned by the remote
method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
return _get_impl().multicall(CONF, context, topic, msg, timeout)
def notify(context, topic, msg, envelope=False):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
:param envelope: Set to True to enable message envelope for notifications.
:returns: None
"""
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
def cleanup():
"""Clean up resources in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup()
def cast_to_server(context, server_params, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_to_server(CONF, context, server_params, topic,
msg)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Broadcast to a remote method invocation with no return.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
topic, msg)
def queue_get_for(context, topic, host):
"""Get a queue name for a given topic + host.
This function only works if this naming convention is followed on the
consumer side, as well. For example, in nova, every instance of the
nova-foo service calls create_consumer() for two topics:
foo
foo.<host>
Messages sent to the 'foo' topic are distributed to exactly one instance of
the nova-foo service. The services are chosen in a round-robin fashion.
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
return '%s.%s' % (topic, host) if host else topic
_RPCIMPL = None
def _get_impl():
"""Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL
if _RPCIMPL is None:
try:
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older nova config.
impl = CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL
|
|
import datetime
from unittest import mock
from django.contrib.postgres.indexes import OpClass
from django.db import (
IntegrityError, NotSupportedError, connection, transaction,
)
from django.db.models import (
CheckConstraint, Deferrable, F, Func, IntegerField, Q, UniqueConstraint,
)
from django.db.models.fields.json import KeyTextTransform
from django.db.models.functions import Cast, Left, Lower
from django.test import modify_settings, skipUnlessDBFeature
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import (
HotelReservation, IntegerArrayModel, RangesModel, Room, Scene,
)
try:
from psycopg2.extras import DateRange, NumericRange
from django.contrib.postgres.constraints import ExclusionConstraint
from django.contrib.postgres.fields import (
DateTimeRangeField, RangeBoundary, RangeOperators,
)
except ImportError:
pass
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'})
class SchemaTests(PostgreSQLTestCase):
get_opclass_query = '''
SELECT opcname, c.relname FROM pg_opclass AS oc
JOIN pg_index as i on oc.oid = ANY(i.indclass)
JOIN pg_class as c on c.oid = i.indexrelid
WHERE c.relname = %s
'''
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_check_constraint_range_value(self):
constraint_name = 'ints_between'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = CheckConstraint(
check=Q(ints__contained_by=NumericRange(10, 30)),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(20, 50))
RangesModel.objects.create(ints=(10, 30))
def test_check_constraint_daterange_contains(self):
constraint_name = 'dates_contains'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = CheckConstraint(
check=Q(dates__contains=F('dates_inner')),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
date_1 = datetime.date(2016, 1, 1)
date_2 = datetime.date(2016, 1, 4)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2.replace(day=5)),
)
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2),
)
def test_check_constraint_datetimerange_contains(self):
constraint_name = 'timestamps_contains'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = CheckConstraint(
check=Q(timestamps__contains=F('timestamps_inner')),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
datetime_1 = datetime.datetime(2016, 1, 1)
datetime_2 = datetime.datetime(2016, 1, 2, 12)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2.replace(hour=13)),
)
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2),
)
def test_opclass(self):
constraint = UniqueConstraint(
name='test_opclass',
fields=['scene'],
opclasses=['varchar_pattern_ops'],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
self.assertIn(constraint.name, self.get_constraints(Scene._meta.db_table))
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[('varchar_pattern_ops', constraint.name)],
)
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
def test_opclass_multiple_columns(self):
constraint = UniqueConstraint(
name='test_opclass_multiple',
fields=['scene', 'setting'],
opclasses=['varchar_pattern_ops', 'text_pattern_ops'],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
expected_opclasses = (
('varchar_pattern_ops', constraint.name),
('text_pattern_ops', constraint.name),
)
self.assertCountEqual(cursor.fetchall(), expected_opclasses)
def test_opclass_partial(self):
constraint = UniqueConstraint(
name='test_opclass_partial',
fields=['scene'],
opclasses=['varchar_pattern_ops'],
condition=Q(setting__contains="Sir Bedemir's Castle"),
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[('varchar_pattern_ops', constraint.name)],
)
@skipUnlessDBFeature('supports_covering_indexes')
def test_opclass_include(self):
constraint = UniqueConstraint(
name='test_opclass_include',
fields=['scene'],
opclasses=['varchar_pattern_ops'],
include=['setting'],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[('varchar_pattern_ops', constraint.name)],
)
@skipUnlessDBFeature('supports_expression_indexes')
def test_opclass_func(self):
constraint = UniqueConstraint(
OpClass(Lower('scene'), name='text_pattern_ops'),
name='test_opclass_func',
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
constraints = self.get_constraints(Scene._meta.db_table)
self.assertIs(constraints[constraint.name]['unique'], True)
self.assertIn(constraint.name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[('text_pattern_ops', constraint.name)],
)
Scene.objects.create(scene='Scene 10', setting='The dark forest of Ewing')
with self.assertRaises(IntegrityError), transaction.atomic():
Scene.objects.create(scene='ScEnE 10', setting="Sir Bedemir's Castle")
Scene.objects.create(scene='Scene 5', setting="Sir Bedemir's Castle")
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
Scene.objects.create(scene='ScEnE 10', setting="Sir Bedemir's Castle")
class ExclusionConstraintTests(PostgreSQLTestCase):
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_invalid_condition(self):
msg = 'ExclusionConstraint.condition must be a Q instance.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type='GIST',
name='exclude_invalid_condition',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
condition=F('invalid'),
)
def test_invalid_index_type(self):
msg = 'Exclusion constraints only support GiST or SP-GiST indexes.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type='gin',
name='exclude_invalid_index_type',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
)
def test_invalid_expressions(self):
msg = 'The expressions must be a list of 2-tuples.'
for expressions in (['foo'], [('foo')], [('foo_1', 'foo_2', 'foo_3')]):
with self.subTest(expressions), self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type='GIST',
name='exclude_invalid_expressions',
expressions=expressions,
)
def test_empty_expressions(self):
msg = 'At least one expression is required to define an exclusion constraint.'
for empty_expressions in (None, []):
with self.subTest(empty_expressions), self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type='GIST',
name='exclude_empty_expressions',
expressions=empty_expressions,
)
def test_invalid_deferrable(self):
msg = 'ExclusionConstraint.deferrable must be a Deferrable instance.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name='exclude_invalid_deferrable',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
deferrable='invalid',
)
def test_deferrable_with_condition(self):
msg = 'ExclusionConstraint with conditions cannot be deferred.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name='exclude_invalid_condition',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
condition=Q(cancelled=False),
deferrable=Deferrable.DEFERRED,
)
def test_invalid_include_type(self):
msg = 'ExclusionConstraint.include must be a list or tuple.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name='exclude_invalid_include',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
include='invalid',
)
def test_invalid_opclasses_type(self):
msg = 'ExclusionConstraint.opclasses must be a list or tuple.'
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name='exclude_invalid_opclasses',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
opclasses='invalid',
)
def test_opclasses_and_expressions_same_length(self):
msg = (
'ExclusionConstraint.expressions and '
'ExclusionConstraint.opclasses must have the same number of '
'elements.'
)
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name='exclude_invalid_expressions_opclasses_length',
expressions=[(F('datespan'), RangeOperators.OVERLAPS)],
opclasses=['foo', 'bar'],
)
def test_repr(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
(F('datespan'), RangeOperators.OVERLAPS),
(F('room'), RangeOperators.EQUAL),
],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '&&'), (F(room), '=')] name='exclude_overlapping'>",
)
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)],
condition=Q(cancelled=False),
index_type='SPGiST',
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='SPGiST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"condition=(AND: ('cancelled', False))>",
)
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)],
deferrable=Deferrable.IMMEDIATE,
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"deferrable=Deferrable.IMMEDIATE>",
)
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)],
include=['cancelled', 'room'],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"include=('cancelled', 'room')>",
)
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[(F('datespan'), RangeOperators.ADJACENT_TO)],
opclasses=['range_ops'],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"opclasses=['range_ops']>",
)
def test_eq(self):
constraint_1 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
(F('datespan'), RangeOperators.OVERLAPS),
(F('room'), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
constraint_2 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
)
constraint_3 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS)],
condition=Q(cancelled=False),
)
constraint_4 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
deferrable=Deferrable.DEFERRED,
)
constraint_5 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
deferrable=Deferrable.IMMEDIATE,
)
constraint_6 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
deferrable=Deferrable.IMMEDIATE,
include=['cancelled'],
)
constraint_7 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
include=['cancelled'],
)
constraint_8 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
include=['cancelled'],
opclasses=['range_ops', 'range_ops']
)
constraint_9 = ExclusionConstraint(
name='exclude_overlapping',
expressions=[
('datespan', RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL),
],
opclasses=['range_ops', 'range_ops']
)
self.assertEqual(constraint_1, constraint_1)
self.assertEqual(constraint_1, mock.ANY)
self.assertNotEqual(constraint_1, constraint_2)
self.assertNotEqual(constraint_1, constraint_3)
self.assertNotEqual(constraint_1, constraint_4)
self.assertNotEqual(constraint_2, constraint_3)
self.assertNotEqual(constraint_2, constraint_4)
self.assertNotEqual(constraint_2, constraint_7)
self.assertNotEqual(constraint_2, constraint_9)
self.assertNotEqual(constraint_4, constraint_5)
self.assertNotEqual(constraint_5, constraint_6)
self.assertNotEqual(constraint_7, constraint_8)
self.assertNotEqual(constraint_1, object())
def test_deconstruct(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
})
def test_deconstruct_index_type(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
index_type='SPGIST',
expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'index_type': 'SPGIST',
'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
})
def test_deconstruct_condition(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
condition=Q(cancelled=False),
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'expressions': [('datespan', RangeOperators.OVERLAPS), ('room', RangeOperators.EQUAL)],
'condition': Q(cancelled=False),
})
def test_deconstruct_deferrable(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS)],
deferrable=Deferrable.DEFERRED,
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'expressions': [('datespan', RangeOperators.OVERLAPS)],
'deferrable': Deferrable.DEFERRED,
})
def test_deconstruct_include(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS)],
include=['cancelled', 'room'],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'expressions': [('datespan', RangeOperators.OVERLAPS)],
'include': ('cancelled', 'room'),
})
def test_deconstruct_opclasses(self):
constraint = ExclusionConstraint(
name='exclude_overlapping',
expressions=[('datespan', RangeOperators.OVERLAPS)],
opclasses=['range_ops'],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.constraints.ExclusionConstraint')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'name': 'exclude_overlapping',
'expressions': [('datespan', RangeOperators.OVERLAPS)],
'opclasses': ['range_ops'],
})
def _test_range_overlaps(self, constraint):
# Create exclusion constraint.
self.assertNotIn(constraint.name, self.get_constraints(HotelReservation._meta.db_table))
with connection.schema_editor() as editor:
editor.add_constraint(HotelReservation, constraint)
self.assertIn(constraint.name, self.get_constraints(HotelReservation._meta.db_table))
# Add initial reservations.
room101 = Room.objects.create(number=101)
room102 = Room.objects.create(number=102)
datetimes = [
timezone.datetime(2018, 6, 20),
timezone.datetime(2018, 6, 24),
timezone.datetime(2018, 6, 26),
timezone.datetime(2018, 6, 28),
timezone.datetime(2018, 6, 29),
]
HotelReservation.objects.create(
datespan=DateRange(datetimes[0].date(), datetimes[1].date()),
start=datetimes[0],
end=datetimes[1],
room=room102,
)
HotelReservation.objects.create(
datespan=DateRange(datetimes[1].date(), datetimes[3].date()),
start=datetimes[1],
end=datetimes[3],
room=room102,
)
# Overlap dates.
with self.assertRaises(IntegrityError), transaction.atomic():
reservation = HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
)
reservation.save()
# Valid range.
HotelReservation.objects.bulk_create([
# Other room.
HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room101,
),
# Cancelled reservation.
HotelReservation(
datespan=(datetimes[1].date(), datetimes[1].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
cancelled=True,
),
# Other adjacent dates.
HotelReservation(
datespan=(datetimes[3].date(), datetimes[4].date()),
start=datetimes[3],
end=datetimes[4],
room=room102,
),
])
def test_range_overlaps_custom(self):
class TsTzRange(Func):
function = 'TSTZRANGE'
output_field = DateTimeRangeField()
constraint = ExclusionConstraint(
name='exclude_overlapping_reservations_custom',
expressions=[
(TsTzRange('start', 'end', RangeBoundary()), RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL)
],
condition=Q(cancelled=False),
opclasses=['range_ops', 'gist_int4_ops'],
)
self._test_range_overlaps(constraint)
def test_range_overlaps(self):
constraint = ExclusionConstraint(
name='exclude_overlapping_reservations',
expressions=[
(F('datespan'), RangeOperators.OVERLAPS),
('room', RangeOperators.EQUAL)
],
condition=Q(cancelled=False),
)
self._test_range_overlaps(constraint)
def test_range_adjacent(self):
constraint_name = 'ints_adjacent'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_expressions_with_params(self):
constraint_name = 'scene_left_equal'
self.assertNotIn(constraint_name, self.get_constraints(Scene._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[(Left('scene', 4), RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
self.assertIn(constraint_name, self.get_constraints(Scene._meta.db_table))
def test_expressions_with_key_transform(self):
constraint_name = 'exclude_overlapping_reservations_smoking'
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(F('datespan'), RangeOperators.OVERLAPS),
(KeyTextTransform('smoking', 'requirements'), RangeOperators.EQUAL),
],
)
with connection.schema_editor() as editor:
editor.add_constraint(HotelReservation, constraint)
self.assertIn(
constraint_name,
self.get_constraints(HotelReservation._meta.db_table),
)
def test_index_transform(self):
constraint_name = 'first_index_equal'
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('field__0', RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(IntegerArrayModel, constraint)
self.assertIn(
constraint_name,
self.get_constraints(IntegerArrayModel._meta.db_table),
)
def test_range_adjacent_initially_deferred(self):
constraint_name = 'ints_adjacent_deferred'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
adjacent_range = RangesModel.objects.create(ints=(10, 20))
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(constraint_name)
cursor.execute('SET CONSTRAINTS %s IMMEDIATE' % quoted_name)
# Remove adjacent range before the end of transaction.
adjacent_range.delete()
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
@skipUnlessDBFeature('supports_covering_gist_indexes')
def test_range_adjacent_gist_include(self):
constraint_name = 'ints_adjacent_gist_include'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
index_type='gist',
include=['decimals', 'ints'],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
@skipUnlessDBFeature('supports_covering_spgist_indexes')
def test_range_adjacent_spgist_include(self):
constraint_name = 'ints_adjacent_spgist_include'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
index_type='spgist',
include=['decimals', 'ints'],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
@skipUnlessDBFeature('supports_covering_gist_indexes')
def test_range_adjacent_gist_include_condition(self):
constraint_name = 'ints_adjacent_gist_include_condition'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
index_type='gist',
include=['decimals'],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature('supports_covering_spgist_indexes')
def test_range_adjacent_spgist_include_condition(self):
constraint_name = 'ints_adjacent_spgist_include_condition'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
index_type='spgist',
include=['decimals'],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature('supports_covering_gist_indexes')
def test_range_adjacent_gist_include_deferrable(self):
constraint_name = 'ints_adjacent_gist_include_deferrable'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
index_type='gist',
include=['decimals'],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature('supports_covering_spgist_indexes')
def test_range_adjacent_spgist_include_deferrable(self):
constraint_name = 'ints_adjacent_spgist_include_deferrable'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
index_type='spgist',
include=['decimals'],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_gist_include_not_supported(self):
constraint_name = 'ints_adjacent_gist_include_not_supported'
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
index_type='gist',
include=['id'],
)
msg = (
'Covering exclusion constraints using a GiST index require '
'PostgreSQL 12+.'
)
with connection.schema_editor() as editor:
with mock.patch(
'django.db.backends.postgresql.features.DatabaseFeatures.supports_covering_gist_indexes',
False,
):
with self.assertRaisesMessage(NotSupportedError, msg):
editor.add_constraint(RangesModel, constraint)
def test_spgist_include_not_supported(self):
constraint_name = 'ints_adjacent_spgist_include_not_supported'
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
index_type='spgist',
include=['id'],
)
msg = (
'Covering exclusion constraints using an SP-GiST index require '
'PostgreSQL 14+.'
)
with connection.schema_editor() as editor:
with mock.patch(
'django.db.backends.postgresql.features.DatabaseFeatures.'
'supports_covering_spgist_indexes',
False,
):
with self.assertRaisesMessage(NotSupportedError, msg):
editor.add_constraint(RangesModel, constraint)
def test_range_adjacent_opclasses(self):
constraint_name = 'ints_adjacent_opclasses'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
opclasses=['range_ops'],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_opclasses_condition(self):
constraint_name = 'ints_adjacent_opclasses_condition'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
opclasses=['range_ops'],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_opclasses_deferrable(self):
constraint_name = 'ints_adjacent_opclasses_deferrable'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
opclasses=['range_ops'],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature('supports_covering_gist_indexes')
def test_range_adjacent_gist_opclasses_include(self):
constraint_name = 'ints_adjacent_gist_opclasses_include'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
index_type='gist',
opclasses=['range_ops'],
include=['decimals'],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature('supports_covering_spgist_indexes')
def test_range_adjacent_spgist_opclasses_include(self):
constraint_name = 'ints_adjacent_spgist_opclasses_include'
self.assertNotIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[('ints', RangeOperators.ADJACENT_TO)],
index_type='spgist',
opclasses=['range_ops'],
include=['decimals'],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_equal_cast(self):
constraint_name = 'exclusion_equal_room_cast'
self.assertNotIn(constraint_name, self.get_constraints(Room._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[(Cast('number', IntegerField()), RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(Room, constraint)
self.assertIn(constraint_name, self.get_constraints(Room._meta.db_table))
|
|
# -*- coding: utf-8 -*-
import mock
import pytest
import unittest
from json import dumps
from addons.base.tests.models import (OAuthAddonNodeSettingsTestSuiteMixin,
OAuthAddonUserSettingTestSuiteMixin)
from addons.github.models import NodeSettings
from addons.github.tests import factories
from osf_tests.factories import ProjectFactory, UserFactory, DraftRegistrationFactory
from nose.tools import (assert_equal, assert_false, assert_in, assert_is,
assert_not_equal, assert_not_in, assert_true)
from github3 import GitHubError
from github3.repos import Repository
from tests.base import OsfTestCase, get_default_metaschema
from framework.auth import Auth
from addons.base import exceptions
from addons.github.exceptions import NotFoundError
from .utils import create_mock_github
mock_github = create_mock_github()
pytestmark = pytest.mark.django_db
class TestNodeSettings(OAuthAddonNodeSettingsTestSuiteMixin, unittest.TestCase):
short_name = 'github'
full_name = 'GitHub'
ExternalAccountFactory = factories.GitHubAccountFactory
NodeSettingsFactory = factories.GitHubNodeSettingsFactory
NodeSettingsClass = NodeSettings
UserSettingsFactory = factories.GitHubUserSettingsFactory
## Mixin Overrides ##
def _node_settings_class_kwargs(self, node, user_settings):
return {
'user_settings': self.user_settings,
'repo': 'mock',
'user': 'abc',
'owner': self.node
}
def test_set_folder(self):
# GitHub doesn't use folderpicker, and the nodesettings model
# does not need a `set_repo` method
pass
def test_serialize_settings(self):
# GitHub's serialized_settings are a little different from
# common storage addons.
settings = self.node_settings.serialize_waterbutler_settings()
expected = {'owner': self.node_settings.user, 'repo': self.node_settings.repo}
assert_equal(settings, expected)
@mock.patch(
'addons.github.models.UserSettings.revoke_remote_oauth_access',
mock.PropertyMock()
)
def test_complete_has_auth_not_verified(self):
super(TestNodeSettings, self).test_complete_has_auth_not_verified()
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.check_authorization')
def test_to_json(self, mock_repos, mock_check_authorization):
mock_repos.return_value = {}
super(TestNodeSettings, self).test_to_json()
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.check_authorization')
def test_to_json_user_is_owner(self, mock_check_authorization, mock_repos):
mock_check_authorization.return_value = True
mock_repos.return_value = {}
result = self.node_settings.to_json(self.user)
assert_true(result['user_has_auth'])
assert_equal(result['github_user'], 'abc')
assert_true(result['is_owner'])
assert_true(result['valid_credentials'])
assert_equal(result.get('repo_names', None), [])
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.check_authorization')
def test_to_json_user_is_not_owner(self, mock_check_authorization, mock_repos):
mock_check_authorization.return_value = True
mock_repos.return_value = {}
not_owner = UserFactory()
result = self.node_settings.to_json(not_owner)
assert_false(result['user_has_auth'])
assert_equal(result['github_user'], 'abc')
assert_false(result['is_owner'])
assert_true(result['valid_credentials'])
assert_equal(result.get('repo_names', None), None)
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.check_authorization')
def test_get_folders(self, mock_check_authorization, mock_repos):
mock_repos.return_value = [Repository.from_json(dumps({'name': 'test',
'id': '12345',
'owner':
{'login': 'test name'}
}))
]
result = self.node_settings.get_folders()
assert_equal(len(result), 1)
assert_equal(result[0]['id'], '12345')
assert_equal(result[0]['name'], 'test')
assert_equal(result[0]['path'], 'test name/test')
assert_equal(result[0]['kind'], 'repo')
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.check_authorization')
def test_get_folders_not_have_auth(self, mock_repos, mock_check_authorization):
mock_repos.return_value = [Repository.from_json(dumps({'name': 'test',
'id': '12345',
'owner':
{'login': 'test name'}
}))
]
self.node_settings.user_settings = None
with pytest.raises(exceptions.InvalidAuthError):
self.node_settings.get_folders()
class TestUserSettings(OAuthAddonUserSettingTestSuiteMixin, unittest.TestCase):
short_name = 'github'
full_name = 'GitHub'
ExternalAccountFactory = factories.GitHubAccountFactory
def test_public_id(self):
assert_equal(self.user.external_accounts.first().display_name, self.user_settings.public_id)
class TestCallbacks(OsfTestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.project = ProjectFactory()
self.consolidated_auth = Auth(self.project.creator)
self.project.creator.save()
self.non_authenticator = UserFactory()
self.non_authenticator.save()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('github', auth=self.consolidated_auth)
self.project.creator.add_addon('github')
self.external_account = factories.GitHubAccountFactory()
self.project.creator.external_accounts.add(self.external_account)
self.project.creator.save()
self.node_settings = self.project.get_addon('github')
self.user_settings = self.project.creator.get_addon('github')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.external_account = self.external_account
self.node_settings.save()
self.node_settings.set_auth
self.user_settings.oauth_grants[self.project._id] = {self.external_account._id: []}
self.user_settings.save()
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_make_public(self, mock_repo):
mock_repo.side_effect = NotFoundError
result = self.node_settings.before_make_public(self.project)
assert_is(result, None)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_public_gh_public(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json(dumps({'private': False}))
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_public_gh_private(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json(dumps({'private': True}))
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_private_gh_public(self, mock_repo):
mock_repo.return_value = Repository.from_json(dumps({'private': False}))
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_private_gh_private(self, mock_repo):
mock_repo.return_value = Repository.from_json(dumps({'private': True}))
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
def test_before_page_load_not_contributor(self):
message = self.node_settings.before_page_load(self.project, UserFactory())
assert_false(message)
def test_before_page_load_not_logged_in(self):
message = self.node_settings.before_page_load(self.project, None)
assert_false(message)
def test_before_remove_contributor_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.project.creator
)
assert_true(message)
def test_before_remove_contributor_not_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.non_authenticator
)
assert_false(message)
def test_after_remove_contributor_authenticator_self(self):
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, self.consolidated_auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_not_in('You can re-authenticate', message)
def test_after_remove_contributor_authenticator_not_self(self):
auth = Auth(user=self.non_authenticator)
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_in('You can re-authenticate', message)
def test_after_remove_contributor_not_authenticator(self):
self.node_settings.after_remove_contributor(
self.project, self.non_authenticator, self.consolidated_auth
)
assert_not_equal(
self.node_settings.user_settings,
None,
)
def test_after_fork_authenticator(self):
fork = ProjectFactory()
clone = self.node_settings.after_fork(
self.project, fork, self.project.creator,
)
assert_equal(
self.node_settings.user_settings,
clone.user_settings,
)
def test_after_fork_not_authenticator(self):
fork = ProjectFactory()
clone = self.node_settings.after_fork(
self.project, fork, self.non_authenticator,
)
assert_equal(
clone.user_settings,
None,
)
def test_after_delete(self):
self.project.remove_node(Auth(user=self.project.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_true(self.node_settings.user_settings is None)
@mock.patch('website.archiver.tasks.archive')
def test_does_not_get_copied_to_registrations(self, mock_archive):
registration = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(user=self.project.creator),
draft_registration=DraftRegistrationFactory(branched_from=self.project),
)
assert_false(registration.has_addon('github'))
class TestGithubNodeSettings(unittest.TestCase):
def setUp(self):
super(TestGithubNodeSettings, self).setUp()
self.user = UserFactory()
self.user.add_addon('github')
self.user_settings = self.user.get_addon('github')
self.external_account = factories.GitHubAccountFactory()
self.user_settings.owner.external_accounts.add(self.external_account)
self.user_settings.owner.save()
self.node_settings = factories.GitHubNodeSettingsFactory(user_settings=self.user_settings)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_true(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_no_hook(self, mock_delete_hook):
res = self.node_settings.delete_hook()
assert_false(res)
assert_false(mock_delete_hook.called)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_not_found(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = NotFoundError
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_error(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = GitHubError(mock.Mock())
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
|
|
"""
Mixin for cache with joblib
"""
# Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais
# License: simplified BSD
import warnings
import os
import shutil
from distutils.version import LooseVersion
import json
import nibabel
from sklearn.externals.joblib import Memory
memory_classes = (Memory, )
try:
from joblib import Memory as JoblibMemory
memory_classes = (Memory, JoblibMemory)
except ImportError:
pass
import nilearn
__cache_checked = dict()
def _safe_cache(memory, func, **kwargs):
""" A wrapper for mem.cache that flushes the cache if the version
number of nibabel has changed.
"""
cachedir = memory.cachedir
if cachedir is not None and not cachedir in __cache_checked:
version_file = os.path.join(cachedir, 'module_versions.json')
if not os.path.exists(version_file):
versions = dict()
else:
with open(version_file, 'r') as _version_file:
versions = json.load(_version_file)
write_file = False
flush_cache = False
for module in (nibabel, ):
# Keep only the major + minor version numbers
this_version = LooseVersion(module.__version__).version[:2]
this_name = module.__name__
if not this_name in versions:
versions[this_name] = this_version
write_file = True
else:
previous_version = versions[this_name]
if previous_version != this_version:
flush_cache = True
write_file = True
versions[this_name] = this_version
if flush_cache:
if nilearn.check_cache_version:
warnings.warn("Incompatible cache in %s: "
"old version of nibabel. Deleting "
"the cache. Put nilearn.check_cache_version "
"to false to avoid this behavior."
% cachedir)
try:
tmp_dir = (os.path.split(cachedir)[:-1]
+ ('old_%i' % os.getpid(), ))
tmp_dir = os.path.join(*tmp_dir)
# We use rename + unlink to be more robust to race
# conditions
os.rename(cachedir, tmp_dir)
shutil.rmtree(tmp_dir)
except OSError:
# Another process could have removed this dir
pass
try:
os.makedirs(cachedir)
except OSError:
# File exists?
pass
else:
warnings.warn("Incompatible cache in %s: "
"old version of nibabel." % cachedir)
if write_file:
with open(version_file, 'w') as _version_file:
versions = json.dump(versions, _version_file)
__cache_checked[cachedir] = True
return memory.cache(func, **kwargs)
def cache(func, memory, ref_memory_level=2, memory_level=1, **kwargs):
""" Return a joblib.Memory object.
The memory_level determines the level above which the wrapped
function output is cached. By specifying a numeric value for
this level, the user can to control the amount of cache memory
used. This function will cache the function call or not
depending on the cache level.
Parameters
----------
func: function
The function which output is to be cached.
memory: instance of joblib.Memory or string
Used to cache the function call.
ref_memory_level: int
The reference memory_level used to determine if function call must
be cached or not (if memory_level is larger than ref_memory_level
the function is cached)
memory_level: int
The memory_level from which caching must be enabled for the wrapped
function.
kwargs: keyword arguments
The keyword arguments passed to memory.cache
Returns
-------
mem: joblib.MemorizedFunc
object that wraps the function func. This object may be
a no-op, if the requested level is lower than the value given
to _cache()). For consistency, a joblib.Memory object is always
returned.
"""
if ref_memory_level <= memory_level or memory is None:
memory = Memory(cachedir=None)
else:
memory = memory
if isinstance(memory, basestring):
memory = Memory(cachedir=memory)
if not isinstance(memory, memory_classes):
raise TypeError("'memory' argument must be a string or a "
"joblib.Memory object. "
"%s %s was given." % (memory, type(memory)))
if memory.cachedir is None:
warnings.warn("Caching has been enabled (memory_level = %d) "
"but no Memory object or path has been provided"
" (parameter memory). Caching deactivated for "
"function %s." %
(ref_memory_level, func.func_name),
stacklevel=2)
return _safe_cache(memory, func, **kwargs)
class CacheMixin(object):
"""Mixin to add caching to a class.
This class is a thin layer on top of joblib.Memory, that mainly adds a
"caching level", similar to a "log level".
Usage: to cache the results of a method, wrap it in self._cache()
defined by this class. Caching is performed only if the user-specified
cache level (self._memory_level) is greater than the value given as a
parameter to self._cache(). See _cache() documentation for details.
"""
def _cache(self, func, memory_level=1, **kwargs):
""" Return a joblib.Memory object.
The memory_level determines the level above which the wrapped
function output is cached. By specifying a numeric value for
this level, the user can to control the amount of cache memory
used. This function will cache the function call or not
depending on the cache level.
Parameters
----------
func: function
The function the output of which is to be cached.
memory_level: int
The memory_level from which caching must be enabled for the wrapped
function.
Returns
-------
mem: joblib.Memory
object that wraps the function func. This object may be
a no-op, if the requested level is lower than the value given
to _cache()). For consistency, a joblib.Memory object is always
returned.
"""
# Creates attributes if they don't exist
# This is to make creating them in __init__() optional.
if not hasattr(self, "memory_level"):
self.memory_level = 0
if not hasattr(self, "memory"):
self.memory = Memory(cachedir=None)
# If cache level is 0 but a memory object has been provided, set
# memory_level to 1 with a warning.
if self.memory_level == 0:
if (isinstance(self.memory, basestring)
or self.memory.cachedir is not None):
warnings.warn("memory_level is currently set to 0 but "
"a Memory object has been provided. "
"Setting memory_level to 1.")
self.memory_level = 1
verbose = getattr(self, 'verbose', 0)
if self.memory_level < memory_level:
memory = Memory(cachedir=None, verbose=verbose)
return _safe_cache(memory, func, **kwargs)
else:
memory = self.memory
if isinstance(memory, basestring):
memory = Memory(cachedir=memory, verbose=verbose)
if not isinstance(memory, memory_classes):
raise TypeError("'memory' argument must be a string or a "
"joblib.Memory object.")
if memory.cachedir is None:
warnings.warn("Caching has been enabled (memory_level = %d) "
"but no Memory object or path has been provided"
" (parameter memory). Caching deactivated for "
"function %s." %
(self.memory_level, func.func_name))
return _safe_cache(memory, func, **kwargs)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_set_service_properties_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-04-01"
file_services_name = "default"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"FileServicesName": _SERIALIZER.url("file_services_name", file_services_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_service_properties_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-04-01"
file_services_name = "default"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"FileServicesName": _SERIALIZER.url("file_services_name", file_services_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class FileServicesOperations(object):
"""FileServicesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.FileServiceItems":
"""List all file services in storage accounts.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceItems, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.FileServiceItems
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileServiceItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileServiceItems', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices'} # type: ignore
@distributed_trace
def set_service_properties(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.FileServiceProperties",
**kwargs: Any
) -> "_models.FileServiceProperties":
"""Sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The properties of file services in storage accounts, including CORS
(Cross-Origin Resource Sharing) rules.
:type parameters: ~azure.mgmt.storage.v2019_04_01.models.FileServiceProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.FileServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'FileServiceProperties')
request = build_set_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.set_service_properties.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}'} # type: ignore
@distributed_trace
def get_service_properties(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.FileServiceProperties":
"""Gets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.FileServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.get_service_properties.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}'} # type: ignore
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint:disable=protected-access
# pylint:disable=too-many-lines
import functools
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
from typing import Type, Dict, Any, Union, Optional, List
from msrest.serialization import Model
from ._generated.models import (
QueueDescription as InternalQueueDescription,
TopicDescription as InternalTopicDescription,
SubscriptionDescription as InternalSubscriptionDescription,
RuleDescription as InternalRuleDescription,
SqlRuleAction as InternalSqlRuleAction,
EmptyRuleAction as InternalEmptyRuleAction,
CorrelationFilter as InternalCorrelationFilter,
NamespaceProperties as InternalNamespaceProperties,
SqlFilter as InternalSqlFilter,
TrueFilter as InternalTrueFilter,
FalseFilter as InternalFalseFilter,
KeyValue,
AuthorizationRule as InternalAuthorizationRule,
)
from ._model_workaround import (
adjust_attribute_map,
avoid_timedelta_overflow
)
from ._constants import RULE_SQL_COMPATIBILITY_LEVEL
from ._utils import _normalize_entity_path_to_full_path_if_needed
adjust_attribute_map()
# These helpers are to ensure that the Properties objects can't be constructed without all args present,
# as a compromise between our use of kwargs to flatten arg-lists and trying to de-incentivise manual instantiation
# while still trying to provide some guardrails.
def extract_kwarg_template(kwargs, extraction_missing_args, name):
try:
return kwargs[name]
except KeyError:
extraction_missing_args.append(name)
def validate_extraction_missing_args(extraction_missing_args):
if extraction_missing_args:
raise TypeError(
"__init__() missing {} required keyword arguments: {}".format(
len(extraction_missing_args),
" and ".join(["'" + e + "'" for e in extraction_missing_args]),
)
)
class DictMixin(object):
def __setitem__(self, key, item):
# type: (Any, Any) -> None
self.__dict__[key] = item
def __getitem__(self, key):
# type: (Any) -> Any
return self.__dict__[key]
def __repr__(self):
# type: () -> str
return str(self)
def __len__(self):
# type: () -> int
return len(self.keys())
def __delitem__(self, key):
# type: (Any) -> None
self.__dict__[key] = None
def __eq__(self, other):
# type: (Any) -> bool
"""Compare objects by comparing all attributes."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
# type: (Any) -> bool
"""Compare objects by comparing all attributes."""
return not self.__eq__(other)
def __str__(self):
# type: () -> str
return str({k: v for k, v in self.__dict__.items() if not k.startswith("_")})
def has_key(self, k):
# type: (Any) -> bool
return k in self.__dict__
def update(self, *args, **kwargs):
# type: (Any, Any) -> None
return self.__dict__.update(*args, **kwargs)
def keys(self):
# type: () -> list
return [k for k in self.__dict__ if not k.startswith("_")]
def values(self):
# type: () -> list
return [v for k, v in self.__dict__.items() if not k.startswith("_")]
def items(self):
# type: () -> list
return [(k, v) for k, v in self.__dict__.items() if not k.startswith("_")]
def get(self, key, default=None):
# type: (Any, Optional[Any]) -> Any
if key in self.__dict__:
return self.__dict__[key]
return default
class NamespaceProperties(DictMixin):
"""The metadata related to a Service Bus namespace.
:ivar alias: Alias for the geo-disaster recovery Service Bus namespace.
:type alias: str
:ivar created_at_utc: The exact time the namespace was created.
:type created_at_utc: ~datetime.datetime
:ivar messaging_sku: The SKU for the messaging entity. Possible values include: "Basic",
"Standard", "Premium".
:type messaging_sku: str or ~azure.servicebus.management._generated.models.MessagingSku
:ivar messaging_units: The number of messaging units allocated to the namespace.
:type messaging_units: int
:ivar modified_at_utc: The exact time the namespace was last modified.
:type modified_at_utc: ~datetime.datetime
:ivar name: Name of the namespace.
:type name: str
"""
def __init__(self, name, **kwargs):
# type: (str, Any) -> None
self.name = name
extraction_missing_args = [] # type: List[str]
extract_kwarg = functools.partial(
extract_kwarg_template, kwargs, extraction_missing_args
)
self.name = name
self.alias = extract_kwarg("alias")
self.created_at_utc = extract_kwarg("created_at_utc")
self.messaging_sku = extract_kwarg("messaging_sku")
self.messaging_units = extract_kwarg("messaging_units")
self.modified_at_utc = extract_kwarg("modified_at_utc")
self.namespace_type = extract_kwarg("namespace_type")
validate_extraction_missing_args(extraction_missing_args)
@classmethod
def _from_internal_entity(cls, name, internal_entity):
# type: (str, InternalNamespaceProperties) -> NamespaceProperties
namespace_properties = cls(
name,
alias=internal_entity.alias,
created_at_utc=internal_entity.created_time,
messaging_sku=internal_entity.messaging_sku,
messaging_units=internal_entity.messaging_units,
modified_at_utc=internal_entity.modified_time,
namespace_type=internal_entity.namespace_type,
)
return namespace_properties
def _to_internal_entity(self):
internal_entity = InternalNamespaceProperties()
internal_entity.alias = self.alias
internal_entity.created_time = self.created_at_utc
internal_entity.messaging_sku = self.messaging_sku
internal_entity.messaging_units = self.messaging_units
internal_entity.modified_time = self.modified_at_utc
internal_entity.namespace_type = self.namespace_type
return internal_entity
class QueueProperties(DictMixin): # pylint:disable=too-many-instance-attributes
"""Properties of a Service Bus queue resource.
:ivar name: Name of the queue.
:type name: str
:ivar authorization_rules: Authorization rules for resource.
:type authorization_rules: list[~azure.servicebus.management.AuthorizationRule]
:ivar auto_delete_on_idle: ISO 8601 timeSpan idle interval after which the queue is
automatically deleted. The minimum duration is 5 minutes.
:type auto_delete_on_idle: ~datetime.timedelta
:ivar dead_lettering_on_message_expiration: A value that indicates whether this queue has dead
letter support when a message expires.
:type dead_lettering_on_message_expiration: bool
:ivar default_message_time_to_live: ISO 8601 default message timespan to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
:type default_message_time_to_live: ~datetime.timedelta
:ivar duplicate_detection_history_time_window: ISO 8601 timeSpan structure that defines the
duration of the duplicate detection history. The default value is 10 minutes.
:type duplicate_detection_history_time_window: ~datetime.timedelta
:ivar availability_status: Availibility status of the entity. Possible values include:
"Available", "Limited", "Renaming", "Restoring", "Unknown".
:type availability_status: str or
~azure.servicebus.management.EntityAvailabilityStatus
:ivar enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:ivar enable_express: A value that indicates whether Express Entities are enabled. An express
queue holds a message in memory temporarily before writing it to persistent storage.
:type enable_express: bool
:ivar enable_partitioning: A value that indicates whether the queue is to be partitioned
across multiple message brokers.
:type enable_partitioning: bool
:ivar lock_duration: ISO 8601 timespan duration of a peek-lock; that is, the amount of time
that the message is locked for other receivers. The maximum value for LockDuration is 5
minutes; the default value is 1 minute.
:type lock_duration: ~datetime.timedelta
:ivar max_delivery_count: The maximum delivery count. A message is automatically deadlettered
after this number of deliveries. Default value is 10.
:type max_delivery_count: int
:ivar max_size_in_megabytes: The maximum size of the queue in megabytes, which is the size of
memory allocated for the queue.
:type max_size_in_megabytes: int
:ivar requires_duplicate_detection: A value indicating if this queue requires duplicate
detection.
:type requires_duplicate_detection: bool
:ivar requires_session: A value that indicates whether the queue supports the concept of
sessions.
:type requires_session: bool
:ivar status: Status of a Service Bus resource. Possible values include: "Active", "Creating",
"Deleting", "Disabled", "ReceiveDisabled", "Renaming", "Restoring", "SendDisabled", "Unknown".
:type status: str or ~azure.servicebus.management.EntityStatus
:ivar forward_to: The name of the recipient entity to which all the messages sent to the queue
are forwarded to.
:type forward_to: str
:ivar user_metadata: Custom metdata that user can associate with the description. Max length
is 1024 chars.
:type user_metadata: str
:ivar forward_dead_lettered_messages_to: The name of the recipient entity to which all the
dead-lettered messages of this subscription are forwarded to.
:type forward_dead_lettered_messages_to: str
:ivar max_message_size_in_kilobytes: The maximum size in kilobytes of message payload that
can be accepted by the queue. This feature is only available when using a Premium namespace
and Service Bus API version "2021-05" or higher.
:type max_message_size_in_kilobytes: int
"""
def __init__(self, name, **kwargs):
# type: (str, Any) -> None
self.name = name
self._internal_qd = None # type: Optional[InternalQueueDescription]
extraction_missing_args = [] # type: List[str]
extract_kwarg = functools.partial(
extract_kwarg_template, kwargs, extraction_missing_args
)
self.authorization_rules = extract_kwarg("authorization_rules")
self.auto_delete_on_idle = extract_kwarg("auto_delete_on_idle")
self.dead_lettering_on_message_expiration = extract_kwarg(
"dead_lettering_on_message_expiration"
)
self.default_message_time_to_live = extract_kwarg(
"default_message_time_to_live"
)
self.duplicate_detection_history_time_window = extract_kwarg(
"duplicate_detection_history_time_window"
)
self.availability_status = extract_kwarg("availability_status")
self.enable_batched_operations = extract_kwarg("enable_batched_operations")
self.enable_express = extract_kwarg("enable_express")
self.enable_partitioning = extract_kwarg("enable_partitioning")
self.lock_duration = extract_kwarg("lock_duration")
self.max_delivery_count = extract_kwarg("max_delivery_count")
self.max_size_in_megabytes = extract_kwarg("max_size_in_megabytes")
self.requires_duplicate_detection = extract_kwarg(
"requires_duplicate_detection"
)
self.requires_session = extract_kwarg("requires_session")
self.status = extract_kwarg("status")
self.forward_to = extract_kwarg("forward_to")
self.user_metadata = extract_kwarg("user_metadata")
self.forward_dead_lettered_messages_to = extract_kwarg(
"forward_dead_lettered_messages_to"
)
self.max_message_size_in_kilobytes = extract_kwarg("max_message_size_in_kilobytes")
validate_extraction_missing_args(extraction_missing_args)
@classmethod
def _from_internal_entity(cls, name, internal_qd):
# type: (str, InternalQueueDescription) -> QueueProperties
qd = cls(
name,
authorization_rules=[
AuthorizationRule._from_internal_entity(r)
for r in internal_qd.authorization_rules
]
if internal_qd.authorization_rules
else (internal_qd.authorization_rules or []),
auto_delete_on_idle=internal_qd.auto_delete_on_idle,
dead_lettering_on_message_expiration=internal_qd.dead_lettering_on_message_expiration,
default_message_time_to_live=internal_qd.default_message_time_to_live,
duplicate_detection_history_time_window=internal_qd.duplicate_detection_history_time_window,
availability_status=internal_qd.entity_availability_status,
enable_batched_operations=internal_qd.enable_batched_operations,
enable_express=internal_qd.enable_express,
enable_partitioning=internal_qd.enable_partitioning,
lock_duration=internal_qd.lock_duration,
max_delivery_count=internal_qd.max_delivery_count,
max_size_in_megabytes=internal_qd.max_size_in_megabytes,
requires_duplicate_detection=internal_qd.requires_duplicate_detection,
requires_session=internal_qd.requires_session,
status=internal_qd.status,
forward_to=internal_qd.forward_to,
forward_dead_lettered_messages_to=internal_qd.forward_dead_lettered_messages_to,
user_metadata=internal_qd.user_metadata,
max_message_size_in_kilobytes=internal_qd.max_message_size_in_kilobytes
)
qd._internal_qd = deepcopy(internal_qd) # pylint:disable=protected-access
return qd
def _to_internal_entity(self, fully_qualified_namespace, kwargs=None):
# type: (str, Optional[Dict]) -> InternalQueueDescription
kwargs = kwargs or {}
if not self._internal_qd:
internal_qd = InternalQueueDescription()
self._internal_qd = internal_qd
authorization_rules = kwargs.pop("authorization_rules", self.authorization_rules)
self._internal_qd.authorization_rules = (
[r._to_internal_entity() for r in authorization_rules]
if authorization_rules
else authorization_rules
)
self._internal_qd.auto_delete_on_idle = avoid_timedelta_overflow( # type: ignore
kwargs.pop("auto_delete_on_idle", self.auto_delete_on_idle)
)
self._internal_qd.dead_lettering_on_message_expiration = (
kwargs.pop("dead_lettering_on_message_expiration", self.dead_lettering_on_message_expiration)
)
self._internal_qd.default_message_time_to_live = avoid_timedelta_overflow( # type: ignore
kwargs.pop("default_message_time_to_live", self.default_message_time_to_live)
)
self._internal_qd.duplicate_detection_history_time_window = (
kwargs.pop("duplicate_detection_history_time_window", self.duplicate_detection_history_time_window)
)
self._internal_qd.entity_availability_status = kwargs.pop("availability_status", self.availability_status)
self._internal_qd.enable_batched_operations = (
kwargs.pop("enable_batched_operations", self.enable_batched_operations)
)
self._internal_qd.enable_express = kwargs.pop("enable_express", self.enable_express)
self._internal_qd.enable_partitioning = kwargs.pop("enable_partitioning", self.enable_partitioning)
self._internal_qd.lock_duration = kwargs.pop("lock_duration", self.lock_duration)
self._internal_qd.max_delivery_count = kwargs.pop("max_delivery_count", self.max_delivery_count)
self._internal_qd.max_size_in_megabytes = kwargs.pop("max_size_in_megabytes", self.max_size_in_megabytes)
self._internal_qd.requires_duplicate_detection = (
kwargs.pop("requires_duplicate_detection", self.requires_duplicate_detection)
)
self._internal_qd.requires_session = kwargs.pop("requires_session", self.requires_session)
self._internal_qd.status = kwargs.pop("status", self.status)
forward_to = kwargs.pop("forward_to", self.forward_to)
self._internal_qd.forward_to = _normalize_entity_path_to_full_path_if_needed(
forward_to,
fully_qualified_namespace
)
forward_dead_lettered_messages_to = (
kwargs.pop("forward_dead_lettered_messages_to", self.forward_dead_lettered_messages_to)
)
self._internal_qd.forward_dead_lettered_messages_to = _normalize_entity_path_to_full_path_if_needed(
forward_dead_lettered_messages_to,
fully_qualified_namespace
)
self._internal_qd.user_metadata = kwargs.pop("user_metadata", self.user_metadata)
self._internal_qd.max_message_size_in_kilobytes = kwargs.pop(
"max_message_size_in_kilobytes",
self.max_message_size_in_kilobytes
)
return self._internal_qd
class QueueRuntimeProperties(object):
"""Service Bus queue runtime properties."""
def __init__(
self,
):
# type: () -> None
self._name = None # type: Optional[str]
self._internal_qr = None # type: Optional[InternalQueueDescription]
@classmethod
def _from_internal_entity(cls, name, internal_qr):
# type: (str, InternalQueueDescription) -> QueueRuntimeProperties
qr = cls()
qr._name = name
qr._internal_qr = deepcopy(internal_qr) # pylint:disable=protected-access
return qr
@property
def name(self):
"""Name of the queue.
:rtype: str
"""
return self._name
@property
def accessed_at_utc(self):
"""Last time a message was sent, or the last time there was a receive request to this queue.
:rtype: ~datetime.datetime
"""
return self._internal_qr.accessed_at
@property
def created_at_utc(self):
"""The exact time the queue was created.
:rtype: ~datetime.datetime
"""
return self._internal_qr.created_at
@property
def updated_at_utc(self):
"""The exact the entity was updated.
:rtype: ~datetime.datetime
"""
return self._internal_qr.updated_at
@property
def size_in_bytes(self):
"""The size of the queue, in bytes.
:rtype: int
"""
return self._internal_qr.size_in_bytes
@property
def total_message_count(self):
"""Total number of messages.
:rtype: int
"""
return self._internal_qr.message_count
@property
def active_message_count(self):
"""Number of active messages in the queue, topic, or subscription.
:rtype: int
"""
return self._internal_qr.message_count_details.active_message_count
@property
def dead_letter_message_count(self):
"""Number of messages that are dead lettered.
:rtype: int
"""
return self._internal_qr.message_count_details.dead_letter_message_count
@property
def scheduled_message_count(self):
"""Number of scheduled messages.
:rtype: int
"""
return self._internal_qr.message_count_details.scheduled_message_count
@property
def transfer_dead_letter_message_count(self):
"""Number of messages transferred into dead letters.
:rtype: int
"""
return (
self._internal_qr.message_count_details.transfer_dead_letter_message_count
)
@property
def transfer_message_count(self):
"""Number of messages transferred to another queue, topic, or subscription.
:rtype: int
"""
return self._internal_qr.message_count_details.transfer_message_count
class TopicProperties(DictMixin): # pylint:disable=too-many-instance-attributes
"""Properties of a Service Bus topic resource.
:ivar name: Name of the topic.
:type name: str
:ivar default_message_time_to_live: ISO 8601 default message timespan to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
:type default_message_time_to_live: ~datetime.timedelta
:ivar max_size_in_megabytes: The maximum size of the topic in megabytes, which is the size of
memory allocated for the topic.
:type max_size_in_megabytes: long
:ivar requires_duplicate_detection: A value indicating if this topic requires duplicate
detection.
:type requires_duplicate_detection: bool
:ivar duplicate_detection_history_time_window: ISO 8601 timeSpan structure that defines the
duration of the duplicate detection history. The default value is 10 minutes.
:type duplicate_detection_history_time_window: ~datetime.timedelta
:ivar enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:ivar size_in_bytes: The size of the topic, in bytes.
:type size_in_bytes: int
:ivar filtering_messages_before_publishing: Filter messages before publishing.
:type filtering_messages_before_publishing: bool
:ivar authorization_rules: Authorization rules for resource.
:type authorization_rules:
list[~azure.servicebus.management.AuthorizationRule]
:ivar status: Status of a Service Bus resource. Possible values include: "Active", "Creating",
"Deleting", "Disabled", "ReceiveDisabled", "Renaming", "Restoring", "SendDisabled", "Unknown".
:type status: str or ~azure.servicebus.management.EntityStatus
:ivar support_ordering: A value that indicates whether the topic supports ordering.
:type support_ordering: bool
:ivar auto_delete_on_idle: ISO 8601 timeSpan idle interval after which the topic is
automatically deleted. The minimum duration is 5 minutes.
:type auto_delete_on_idle: ~datetime.timedelta
:ivar enable_partitioning: A value that indicates whether the topic is to be partitioned
across multiple message brokers.
:type enable_partitioning: bool
:ivar availability_status: Availability status of the entity. Possible values include:
"Available", "Limited", "Renaming", "Restoring", "Unknown".
:type availability_status: str or
~azure.servicebus.management.EntityAvailabilityStatus
:ivar enable_express: A value that indicates whether Express Entities are enabled. An express
queue holds a message in memory temporarily before writing it to persistent storage.
:type enable_express: bool
:ivar user_metadata: Metadata associated with the topic.
:type user_metadata: str
:ivar max_message_size_in_kilobytes: The maximum size in kilobytes of message payload that
can be accepted by the topic. This feature is only available when using a Premium namespace
and Service Bus API version "2021-05" or higher.
:type max_message_size_in_kilobytes: int
"""
def __init__(self, name, **kwargs):
# type: (str, Any) -> None
self.name = name
self._internal_td = None # type: Optional[InternalTopicDescription]
extraction_missing_args = [] # type: List[str]
extract_kwarg = functools.partial(
extract_kwarg_template, kwargs, extraction_missing_args
)
self.default_message_time_to_live = extract_kwarg(
"default_message_time_to_live"
)
self.max_size_in_megabytes = extract_kwarg("max_size_in_megabytes")
self.requires_duplicate_detection = extract_kwarg(
"requires_duplicate_detection"
)
self.duplicate_detection_history_time_window = extract_kwarg(
"duplicate_detection_history_time_window"
)
self.enable_batched_operations = extract_kwarg("enable_batched_operations")
self.size_in_bytes = extract_kwarg("size_in_bytes")
self.authorization_rules = extract_kwarg("authorization_rules")
self.status = extract_kwarg("status")
self.support_ordering = extract_kwarg("support_ordering")
self.auto_delete_on_idle = extract_kwarg("auto_delete_on_idle")
self.enable_partitioning = extract_kwarg("enable_partitioning")
self.availability_status = extract_kwarg("availability_status")
self.enable_express = extract_kwarg("enable_express")
self.user_metadata = extract_kwarg("user_metadata")
self.max_message_size_in_kilobytes = extract_kwarg("max_message_size_in_kilobytes")
validate_extraction_missing_args(extraction_missing_args)
@classmethod
def _from_internal_entity(cls, name, internal_td):
# type: (str, InternalTopicDescription) -> TopicProperties
td = cls(
name,
default_message_time_to_live=internal_td.default_message_time_to_live,
max_size_in_megabytes=internal_td.max_size_in_megabytes,
requires_duplicate_detection=internal_td.requires_duplicate_detection,
duplicate_detection_history_time_window=internal_td.duplicate_detection_history_time_window,
enable_batched_operations=internal_td.enable_batched_operations,
size_in_bytes=internal_td.size_in_bytes,
authorization_rules=[
AuthorizationRule._from_internal_entity(r)
for r in internal_td.authorization_rules
]
if internal_td.authorization_rules
else internal_td.authorization_rules,
status=internal_td.status,
support_ordering=internal_td.support_ordering,
auto_delete_on_idle=internal_td.auto_delete_on_idle,
enable_partitioning=internal_td.enable_partitioning,
availability_status=internal_td.entity_availability_status,
enable_express=internal_td.enable_express,
user_metadata=internal_td.user_metadata,
max_message_size_in_kilobytes=internal_td.max_message_size_in_kilobytes
)
td._internal_td = deepcopy(internal_td)
return td
def _to_internal_entity(self, kwargs=None):
# type: (Optional[Dict]) -> InternalTopicDescription
kwargs = kwargs or {}
if not self._internal_td:
self._internal_td = InternalTopicDescription()
self._internal_td.default_message_time_to_live = avoid_timedelta_overflow( # type: ignore
kwargs.pop("default_message_time_to_live", self.default_message_time_to_live)
)
self._internal_td.max_size_in_megabytes = kwargs.pop("max_size_in_megabytes", self.max_size_in_megabytes)
self._internal_td.requires_duplicate_detection = (
kwargs.pop("requires_duplicate_detection", self.requires_duplicate_detection)
)
self._internal_td.duplicate_detection_history_time_window = (
kwargs.pop("duplicate_detection_history_time_window", self.duplicate_detection_history_time_window)
)
self._internal_td.enable_batched_operations = (
kwargs.pop("enable_batched_operations", self.enable_batched_operations)
)
self._internal_td.size_in_bytes = kwargs.pop("size_in_bytes", self.size_in_bytes)
authorization_rules = kwargs.pop("authorization_rules", self.authorization_rules)
self._internal_td.authorization_rules = (
[r._to_internal_entity() for r in authorization_rules]
if authorization_rules
else authorization_rules
)
self._internal_td.status = kwargs.pop("status", self.status)
self._internal_td.support_ordering = kwargs.pop("support_ordering", self.support_ordering)
self._internal_td.auto_delete_on_idle = avoid_timedelta_overflow( # type: ignore
kwargs.pop("auto_delete_on_idle", self.auto_delete_on_idle)
)
self._internal_td.enable_partitioning = kwargs.pop("enable_partitioning", self.enable_partitioning)
self._internal_td.entity_availability_status = kwargs.pop("availability_status", self.availability_status)
self._internal_td.enable_express = kwargs.pop("enable_express", self.enable_express)
self._internal_td.user_metadata = kwargs.pop("user_metadata", self.user_metadata)
self._internal_td.max_message_size_in_kilobytes = kwargs.pop(
"max_message_size_in_kilobytes",
self.max_message_size_in_kilobytes
)
return self._internal_td
class TopicRuntimeProperties(object):
"""Runtime properties of a Service Bus topic resource."""
def __init__(
self,
):
# type: () -> None
self._name = None # type: Optional[str]
self._internal_td = None # type: Optional[InternalTopicDescription]
@classmethod
def _from_internal_entity(cls, name, internal_td):
# type: (str, InternalTopicDescription) -> TopicRuntimeProperties
qd = cls()
qd._name = name
qd._internal_td = internal_td
return qd
@property
def name(self):
"""The name of the topic.
:rtype: str
"""
return self._name
@property
def accessed_at_utc(self):
"""Last time a message was sent, or the last time there was a receive request
:rtype: ~datetime.datetime
"""
return self._internal_td.accessed_at
@property
def created_at_utc(self):
"""The exact time the queue was created.
:rtype: ~datetime.datetime
"""
return self._internal_td.created_at
@property
def updated_at_utc(self):
"""The exact time the entity was updated.
:rtype: ~datetime.datetime
"""
return self._internal_td.updated_at
@property
def size_in_bytes(self):
"""The current size of the entity in bytes.
:rtype: int
"""
return self._internal_td.size_in_bytes
@property
def subscription_count(self):
"""The number of subscriptions in the topic.
:rtype: int
"""
return self._internal_td.subscription_count
@property
def scheduled_message_count(self):
"""Number of scheduled messages.
:rtype: int
"""
return self._internal_td.message_count_details.scheduled_message_count
class SubscriptionProperties(DictMixin): # pylint:disable=too-many-instance-attributes
"""Properties of a Service Bus topic subscription resource.
:ivar name: Name of the subscription.
:type name: str
:ivar lock_duration: ISO 8601 timespan duration of a peek-lock; that is, the amount of time
that the message is locked for other receivers. The maximum value for LockDuration is 5
minutes; the default value is 1 minute.
:type lock_duration: ~datetime.timedelta
:ivar requires_session: A value that indicates whether the queue supports the concept of
sessions.
:type requires_session: bool
:ivar default_message_time_to_live: ISO 8601 default message timespan to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
:type default_message_time_to_live: ~datetime.timedelta
:ivar dead_lettering_on_message_expiration: A value that indicates whether this subscription
has dead letter support when a message expires.
:type dead_lettering_on_message_expiration: bool
:ivar dead_lettering_on_filter_evaluation_exceptions: A value that indicates whether this
subscription has dead letter support when a message expires.
:type dead_lettering_on_filter_evaluation_exceptions: bool
:ivar max_delivery_count: The maximum delivery count. A message is automatically deadlettered
after this number of deliveries. Default value is 10.
:type max_delivery_count: int
:ivar enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:ivar status: Status of a Service Bus resource. Possible values include: "Active", "Creating",
"Deleting", "Disabled", "ReceiveDisabled", "Renaming", "Restoring", "SendDisabled", "Unknown".
:type status: str or ~azure.servicebus.management.EntityStatus
:ivar forward_to: The name of the recipient entity to which all the messages sent to the
subscription are forwarded to.
:type forward_to: str
:ivar user_metadata: Metadata associated with the subscription. Maximum number of characters
is 1024.
:type user_metadata: str
:ivar forward_dead_lettered_messages_to: The name of the recipient entity to which all the
messages sent to the subscription are forwarded to.
:type forward_dead_lettered_messages_to: str
:ivar auto_delete_on_idle: ISO 8601 timeSpan idle interval after which the subscription is
automatically deleted. The minimum duration is 5 minutes.
:type auto_delete_on_idle: ~datetime.timedelta
:ivar availability_status: Availability status of the entity. Possible values include:
"Available", "Limited", "Renaming", "Restoring", "Unknown".
:type availability_status: str or
~azure.servicebus.management.EntityAvailabilityStatus
"""
def __init__(self, name, **kwargs):
# type: (str, Any) -> None
self.name = name
self._internal_sd = None # type: Optional[InternalSubscriptionDescription]
extraction_missing_args = [] # type: List[str]
extract_kwarg = functools.partial(
extract_kwarg_template, kwargs, extraction_missing_args
)
self.lock_duration = extract_kwarg("lock_duration")
self.requires_session = extract_kwarg("requires_session")
self.default_message_time_to_live = extract_kwarg(
"default_message_time_to_live"
)
self.dead_lettering_on_message_expiration = extract_kwarg(
"dead_lettering_on_message_expiration"
)
self.dead_lettering_on_filter_evaluation_exceptions = extract_kwarg(
"dead_lettering_on_filter_evaluation_exceptions"
)
self.max_delivery_count = extract_kwarg("max_delivery_count")
self.enable_batched_operations = extract_kwarg("enable_batched_operations")
self.status = extract_kwarg("status")
self.forward_to = extract_kwarg("forward_to")
self.user_metadata = extract_kwarg("user_metadata")
self.forward_dead_lettered_messages_to = extract_kwarg(
"forward_dead_lettered_messages_to"
)
self.auto_delete_on_idle = extract_kwarg("auto_delete_on_idle")
self.availability_status = extract_kwarg("availability_status")
validate_extraction_missing_args(extraction_missing_args)
@classmethod
def _from_internal_entity(cls, name, internal_subscription):
# type: (str, InternalSubscriptionDescription) -> SubscriptionProperties
subscription = cls(
name,
lock_duration=internal_subscription.lock_duration,
requires_session=internal_subscription.requires_session,
default_message_time_to_live=internal_subscription.default_message_time_to_live,
dead_lettering_on_message_expiration=internal_subscription.dead_lettering_on_message_expiration,
dead_lettering_on_filter_evaluation_exceptions=
internal_subscription.dead_lettering_on_filter_evaluation_exceptions,
max_delivery_count=internal_subscription.max_delivery_count,
enable_batched_operations=internal_subscription.enable_batched_operations,
status=internal_subscription.status,
forward_to=internal_subscription.forward_to,
user_metadata=internal_subscription.user_metadata,
forward_dead_lettered_messages_to=internal_subscription.forward_dead_lettered_messages_to,
auto_delete_on_idle=internal_subscription.auto_delete_on_idle,
availability_status=internal_subscription.entity_availability_status,
)
subscription._internal_sd = deepcopy(internal_subscription)
return subscription
def _to_internal_entity(self, fully_qualified_namespace, kwargs=None):
# type: (str, Optional[Dict]) -> InternalSubscriptionDescription
kwargs = kwargs or {}
if not self._internal_sd:
self._internal_sd = InternalSubscriptionDescription()
self._internal_sd.lock_duration = kwargs.pop("lock_duration", self.lock_duration)
self._internal_sd.requires_session = kwargs.pop("requires_session", self.requires_session)
self._internal_sd.default_message_time_to_live = avoid_timedelta_overflow( # type: ignore
kwargs.pop("default_message_time_to_live", self.default_message_time_to_live)
)
self._internal_sd.dead_lettering_on_message_expiration = (
kwargs.pop("dead_lettering_on_message_expiration", self.dead_lettering_on_message_expiration)
)
self._internal_sd.dead_lettering_on_filter_evaluation_exceptions = (
kwargs.pop(
"dead_lettering_on_filter_evaluation_exceptions",
self.dead_lettering_on_filter_evaluation_exceptions
)
)
self._internal_sd.max_delivery_count = kwargs.pop("max_delivery_count", self.max_delivery_count)
self._internal_sd.enable_batched_operations = (
kwargs.pop("enable_batched_operations", self.enable_batched_operations)
)
self._internal_sd.status = kwargs.pop("status", self.status)
forward_to = kwargs.pop("forward_to", self.forward_to)
self._internal_sd.forward_to = _normalize_entity_path_to_full_path_if_needed(
forward_to,
fully_qualified_namespace
)
forward_dead_lettered_messages_to = (
kwargs.pop("forward_dead_lettered_messages_to", self.forward_dead_lettered_messages_to)
)
self._internal_sd.forward_dead_lettered_messages_to = _normalize_entity_path_to_full_path_if_needed(
forward_dead_lettered_messages_to,
fully_qualified_namespace
)
self._internal_sd.user_metadata = kwargs.pop("user_metadata", self.user_metadata)
self._internal_sd.auto_delete_on_idle = avoid_timedelta_overflow( # type: ignore
kwargs.pop("auto_delete_on_idle", self.auto_delete_on_idle)
)
self._internal_sd.entity_availability_status = kwargs.pop("availability_status", self.availability_status)
return self._internal_sd
class SubscriptionRuntimeProperties(object):
"""Runtime properties of a Service Bus topic subscription resource."""
def __init__(self):
# type: () -> None
self._internal_sd = None # type: Optional[InternalSubscriptionDescription]
self._name = None # type: Optional[str]
@classmethod
def _from_internal_entity(cls, name, internal_subscription):
# type: (str, InternalSubscriptionDescription) -> SubscriptionRuntimeProperties
subscription = cls()
subscription._name = name
subscription._internal_sd = internal_subscription
return subscription
@property
def name(self):
"""Name of subscription
:rtype: str
"""
return self._name
@property
def accessed_at_utc(self):
"""Last time a message was sent, or the last time there was a receive request
:rtype: ~datetime.datetime
"""
return self._internal_sd.accessed_at
@property
def created_at_utc(self):
"""The exact time the subscription was created.
:rtype: ~datetime.datetime
"""
return self._internal_sd.created_at
@property
def updated_at_utc(self):
"""The exact time the entity is updated.
:rtype: ~datetime.datetime
"""
return self._internal_sd.updated_at
@property
def total_message_count(self):
"""The number of messages in the subscription.
:rtype: int
"""
return self._internal_sd.message_count
@property
def active_message_count(self):
"""Number of active messages in the subscription.
:rtype: int
"""
return self._internal_sd.message_count_details.active_message_count
@property
def dead_letter_message_count(self):
"""Number of messages that are dead lettered.
:rtype: int
"""
return self._internal_sd.message_count_details.dead_letter_message_count
@property
def transfer_dead_letter_message_count(self):
"""Number of messages transferred into dead letters.
:rtype: int
"""
return (
self._internal_sd.message_count_details.transfer_dead_letter_message_count
)
@property
def transfer_message_count(self):
"""Number of messages transferred to another queue, topic, or subscription.
:rtype: int
"""
return self._internal_sd.message_count_details.transfer_message_count
class RuleProperties(DictMixin):
"""Properties of a topic subscription rule.
:param name: Name of the rule.
:type name: str
:ivar filter: The filter of the rule.
:type filter: Union[~azure.servicebus.management.CorrelationRuleFilter,
~azure.servicebus.management.SqlRuleFilter]
:ivar action: The action of the rule.
:type action: Optional[~azure.servicebus.management.SqlRuleAction]
:ivar created_at_utc: The exact time the rule was created.
:type created_at_utc: ~datetime.datetime
"""
def __init__(self, name, **kwargs):
# type: (str, Any) -> None
self.name = name
self._internal_rule = None # type: Optional[InternalRuleDescription]
extraction_missing_args = [] # type: List[str]
extract_kwarg = functools.partial(
extract_kwarg_template, kwargs, extraction_missing_args
)
self.filter = extract_kwarg("filter")
self.action = extract_kwarg("action")
self.created_at_utc = extract_kwarg("created_at_utc")
validate_extraction_missing_args(extraction_missing_args)
@classmethod
def _from_internal_entity(cls, name, internal_rule):
# type: (str, InternalRuleDescription) -> RuleProperties
rule = cls(
name,
filter=RULE_CLASS_MAPPING[type(internal_rule.filter)]._from_internal_entity(
internal_rule.filter
)
if internal_rule.filter
and isinstance(internal_rule.filter, tuple(RULE_CLASS_MAPPING.keys()))
else None,
action=RULE_CLASS_MAPPING[type(internal_rule.action)]._from_internal_entity(
internal_rule.action
)
if internal_rule.action
and isinstance(internal_rule.action, tuple(RULE_CLASS_MAPPING.keys()))
else None,
created_at_utc=internal_rule.created_at,
)
rule._internal_rule = deepcopy(internal_rule)
return rule
def _to_internal_entity(self, kwargs=None):
# type: (Optional[Dict]) -> InternalRuleDescription
kwargs = kwargs or {}
if not self._internal_rule:
self._internal_rule = InternalRuleDescription()
rule_filter = kwargs.pop("filter", self.filter)
self._internal_rule.filter = rule_filter._to_internal_entity() if rule_filter else TRUE_FILTER # type: ignore
action = kwargs.pop("action", self.action)
self._internal_rule.action = (
action._to_internal_entity() if action else EMPTY_RULE_ACTION
)
self._internal_rule.created_at = kwargs.pop("created_at_utc", self.created_at_utc)
self._internal_rule.name = kwargs.pop("name", self.name)
return self._internal_rule
class CorrelationRuleFilter(object):
"""Represents the correlation filter expression.
:param correlation_id: Identifier of the correlation.
:type correlation_id: str
:param message_id: Identifier of the message.
:type message_id: str
:param to: Address to send to.
:type to: str
:param reply_to: Address of the queue to reply to.
:type reply_to: str
:param label: Application specific label.
:type label: str
:param session_id: Session identifier.
:type session_id: str
:param reply_to_session_id: Session identifier to reply to.
:type reply_to_session_id: str
:param content_type: Content type of the message.
:type content_type: str
:param properties: dictionary object for custom filters
:type properties: dict[str, Union[str, int, float, bool, datetime, timedelta]]
"""
def __init__(self, **kwargs):
# type: (Any) -> None
self.correlation_id = kwargs.get("correlation_id", None)
self.message_id = kwargs.get("message_id", None)
self.to = kwargs.get("to", None)
self.reply_to = kwargs.get("reply_to", None)
self.label = kwargs.get("label", None)
self.session_id = kwargs.get("session_id", None)
self.reply_to_session_id = kwargs.get("reply_to_session_id", None)
self.content_type = kwargs.get("content_type", None)
self.properties = kwargs.get("properties", None)
@classmethod
def _from_internal_entity(cls, internal_correlation_filter):
# type: (InternalCorrelationFilter) -> CorrelationRuleFilter
correlation_filter = cls()
correlation_filter.correlation_id = internal_correlation_filter.correlation_id
correlation_filter.message_id = internal_correlation_filter.message_id
correlation_filter.to = internal_correlation_filter.to
correlation_filter.reply_to = internal_correlation_filter.reply_to
correlation_filter.label = internal_correlation_filter.label
correlation_filter.session_id = internal_correlation_filter.session_id
correlation_filter.reply_to_session_id = (
internal_correlation_filter.reply_to_session_id
)
correlation_filter.content_type = internal_correlation_filter.content_type
correlation_filter.properties = (
OrderedDict(
(kv.key, kv.value) for kv in internal_correlation_filter.properties
)
if internal_correlation_filter.properties
else OrderedDict()
)
return correlation_filter
def _to_internal_entity(self):
# type: () -> InternalCorrelationFilter
internal_entity = InternalCorrelationFilter()
internal_entity.correlation_id = self.correlation_id
internal_entity.message_id = self.message_id
internal_entity.to = self.to
internal_entity.reply_to = self.reply_to
internal_entity.label = self.label
internal_entity.session_id = self.session_id
internal_entity.reply_to_session_id = self.reply_to_session_id
internal_entity.content_type = self.content_type
internal_entity.properties = (
[KeyValue(key=key, value=value) for key, value in self.properties.items()]
if self.properties
else None
)
return internal_entity
class SqlRuleFilter(object):
"""Represents a filter which is a composition of an expression and an action
that is executed in the pub/sub pipeline.
.. admonition:: Example:
.. code-block:: python
:caption: Create SqlRuleFilter.
sql_filter = SqlRuleFilter("property1 = 'value'")
sql_filter_parametrized = SqlRuleFilter(
"property1 = @param1 AND property2 = @param2",
parameters={
"@param1": "value",
"@param2" : 1
}
)
:param sql_expression: The SQL expression. e.g. MyProperty='ABC'
:type sql_expression: str
:param parameters: Sets the value of the sql expression parameters if any.
:type parameters: Dict[str, Union[str, int, float, bool, datetime, timedelta]]
"""
def __init__(self, sql_expression=None, parameters=None):
# type: (Optional[str], Optional[Dict[str, Union[str, int, float, bool, datetime, timedelta]]]) -> None
self.sql_expression = sql_expression
self.parameters = parameters
self.requires_preprocessing = True
@classmethod
def _from_internal_entity(cls, internal_sql_rule_filter):
sql_rule_filter = cls()
sql_rule_filter.sql_expression = internal_sql_rule_filter.sql_expression
sql_rule_filter.parameters = (
OrderedDict(
(kv.key, kv.value) for kv in internal_sql_rule_filter.parameters
)
if internal_sql_rule_filter.parameters
else OrderedDict()
)
sql_rule_filter.requires_preprocessing = (
internal_sql_rule_filter.requires_preprocessing
)
return sql_rule_filter
def _to_internal_entity(self):
# type: () -> InternalSqlFilter
internal_entity = InternalSqlFilter(sql_expression=self.sql_expression)
internal_entity.parameters = (
[
KeyValue(key=key, value=value) for key, value in self.parameters.items() # type: ignore
]
if self.parameters
else None
)
internal_entity.compatibility_level = RULE_SQL_COMPATIBILITY_LEVEL
internal_entity.requires_preprocessing = self.requires_preprocessing
return internal_entity
class TrueRuleFilter(SqlRuleFilter):
"""A sql filter with a sql expression that is always True"""
def __init__(self):
# type: () -> None
super(TrueRuleFilter, self).__init__("1=1", None)
def _to_internal_entity(self):
internal_entity = InternalTrueFilter()
internal_entity.sql_expression = self.sql_expression
internal_entity.requires_preprocessing = True
internal_entity.compatibility_level = RULE_SQL_COMPATIBILITY_LEVEL
return internal_entity
class FalseRuleFilter(SqlRuleFilter):
"""A sql filter with a sql expression that is always True"""
def __init__(self):
# type: () -> None
super(FalseRuleFilter, self).__init__("1>1", None)
def _to_internal_entity(self):
internal_entity = InternalFalseFilter()
internal_entity.sql_expression = self.sql_expression
internal_entity.requires_preprocessing = True
internal_entity.compatibility_level = RULE_SQL_COMPATIBILITY_LEVEL
return internal_entity
class SqlRuleAction(object):
"""Represents set of actions written in SQL language-based syntax that is
performed against a ServiceBus.Messaging.BrokeredMessage .
:param sql_expression: SQL expression. e.g. MyProperty='ABC'
:type sql_expression: str
:param parameters: Sets the value of the sql expression parameters if any.
:type parameters: Dict[str, Union[str, int, float, bool, datetime, timedelta]]
:type requires_preprocessing: bool
"""
def __init__(self, sql_expression=None, parameters=None):
# type: (Optional[str], Optional[Dict[str, Union[str, int, float, bool, datetime, timedelta]]]) -> None
self.sql_expression = sql_expression
self.parameters = parameters
self.requires_preprocessing = True
@classmethod
def _from_internal_entity(cls, internal_sql_rule_action):
sql_rule_action = cls()
sql_rule_action.sql_expression = internal_sql_rule_action.sql_expression
sql_rule_action.parameters = (
OrderedDict(
(kv.key, kv.value) for kv in internal_sql_rule_action.parameters
)
if internal_sql_rule_action.parameters
else OrderedDict()
)
sql_rule_action.requires_preprocessing = (
internal_sql_rule_action.requires_preprocessing
)
return sql_rule_action
def _to_internal_entity(self):
internal_entity = InternalSqlRuleAction(sql_expression=self.sql_expression)
internal_entity.parameters = (
[KeyValue(key=key, value=value) for key, value in self.parameters.items()]
if self.parameters
else None
)
internal_entity.compatibility_level = RULE_SQL_COMPATIBILITY_LEVEL
internal_entity.requires_preprocessing = self.requires_preprocessing
return internal_entity
RULE_CLASS_MAPPING = {
InternalSqlRuleAction: SqlRuleAction,
# InternalEmptyRuleAction: None,
InternalCorrelationFilter: CorrelationRuleFilter,
InternalSqlFilter: SqlRuleFilter,
InternalTrueFilter: TrueRuleFilter,
InternalFalseFilter: FalseRuleFilter,
} # type: Dict[Type[Model], Type]
EMPTY_RULE_ACTION = InternalEmptyRuleAction()
TRUE_FILTER = TrueRuleFilter()
class AuthorizationRule(object):
"""Authorization rule of an entity.
:param type: The authorization type.
:type type: str
:param claim_type: The claim type.
:type claim_type: str
:param claim_value: The claim value.
:type claim_value: str
:param rights: Access rights of the entity. Values are 'Send', 'Listen', or 'Manage'.
:type rights: list[AccessRights]
:param created_at_utc: The date and time when the authorization rule was created.
:type created_at_utc: ~datetime.datetime
:param modified_at_utc: The date and time when the authorization rule was modified.
:type modified_at_utc: ~datetime.datetime
:param key_name: The authorization rule key name.
:type key_name: str
:param primary_key: The primary key of the authorization rule.
:type primary_key: str
:param secondary_key: The primary key of the authorization rule.
:type secondary_key: str
"""
def __init__(self, **kwargs):
# type: (Any) -> None
self.type = kwargs.get("type", None)
self.claim_type = kwargs.get("claim_type", None)
self.claim_value = kwargs.get("claim_value", None)
self.rights = kwargs.get("rights", None)
self.created_at_utc = kwargs.get("created_at_utc", None)
self.modified_at_utc = kwargs.get("modified_at_utc", None)
self.key_name = kwargs.get("key_name", None)
self.primary_key = kwargs.get("primary_key", None)
self.secondary_key = kwargs.get("secondary_key", None)
@classmethod
def _from_internal_entity(cls, internal_authorization_rule):
authorization_rule = cls()
authorization_rule.claim_type = internal_authorization_rule.claim_type
authorization_rule.claim_value = internal_authorization_rule.claim_value
authorization_rule.rights = internal_authorization_rule.rights
authorization_rule.created_at_utc = internal_authorization_rule.created_time
authorization_rule.modified_at_utc = internal_authorization_rule.modified_time
authorization_rule.key_name = internal_authorization_rule.key_name
authorization_rule.primary_key = internal_authorization_rule.primary_key
authorization_rule.secondary_key = internal_authorization_rule.secondary_key
return authorization_rule
def _to_internal_entity(self):
# type: () -> InternalAuthorizationRule
internal_entity = InternalAuthorizationRule()
internal_entity.claim_type = self.claim_type
internal_entity.claim_value = self.claim_value
internal_entity.rights = self.rights
internal_entity.created_time = self.created_at_utc
internal_entity.modified_time = self.modified_at_utc
internal_entity.key_name = self.key_name
internal_entity.primary_key = self.primary_key
internal_entity.secondary_key = self.secondary_key
return internal_entity
|
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all nameserver health checks."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import random
import sys
import time
import util
from dns import rcode
WILDCARD_DOMAINS = ('live.com.', 'blogspot.com.', 'wordpress.com.')
LIKELY_HIJACKS = ['www.google.com.', 'windowsupdate.microsoft.com.', 'www.paypal.com.']
# How many checks to consider when calculating ns check_duration
SHARED_CACHE_TIMEOUT_MULTIPLIER = 1.25
ROOT_SERVER_TIMEOUT_MULTIPLIER = 0.5
CENSORSHIP_TIMEOUT = 30
MAX_STORE_ATTEMPTS = 4
TOTAL_WILDCARDS_TO_STORE = 2
MAX_PORT_BEHAVIOR_TRIES = 2
FATAL_RCODES = ['REFUSED', 'NOTAUTH']
class NameServerHealthChecks(object):
"""Health checks for a nameserver."""
def TestAnswers(self, record_type, record, expected, critical=False, timeout=None):
"""Test to see that an answer returns correct IP's.
Args:
record_type: text record type for NS query (A, CNAME, etc)
record: string to query for
expected: tuple of strings expected in all answers
critical: If this query fails, should it count against the server.
timeout: timeout for query in seconds (int)
Returns:
(is_broken, error_msg, duration)
"""
is_broken = False
unmatched_answers = []
if not timeout:
timeout = self.health_timeout
(response, duration, error_msg) = self.TimedRequest(record_type, record, timeout)
if response:
response_code = rcode.to_text(response.rcode())
if response_code in FATAL_RCODES:
error_msg = 'Responded with: %s' % response_code
if critical:
is_broken = True
elif not response.answer:
# Avoid preferring broken DNS servers that respond quickly
duration = util.SecondsToMilliseconds(self.health_timeout)
error_msg = 'No answer (%s): %s' % (response_code, record)
is_broken = True
else:
found_usable_record = False
for answer in response.answer:
if found_usable_record:
break
# Process the first sane rdata object available in the answers
for rdata in answer:
# CNAME
if rdata.rdtype == 5:
reply = str(rdata.target)
# A Record
elif rdata.rdtype == 1:
reply = str(rdata.address)
else:
continue
found_usable_record = True
found_match = False
for string in expected:
if reply.startswith(string) or reply.endswith(string):
found_match = True
break
if not found_match:
unmatched_answers.append(reply)
if unmatched_answers:
hijack_text = ', '.join(unmatched_answers).rstrip('.')
if record in LIKELY_HIJACKS:
error_msg = '%s is hijacked: %s' % (record.rstrip('.'), hijack_text)
else:
error_msg = '%s appears incorrect: %s' % (record.rstrip('.'), hijack_text)
else:
if not error_msg:
error_msg = 'No response'
is_broken = True
return (is_broken, error_msg, duration)
def TestBindVersion(self):
"""Test for BIND version. This acts as a pretty decent ping."""
(unused_response, duration, error_msg) = self.RequestVersion()
# Sometimes nameservers aren't able to respond to this request in a way that
# dnspython likes. Lets just always call this one good and use it for latency.
return (False, None, duration)
def TestNodeId(self):
"""Get the current node id."""
self.RequestNodeId()
return (False, False, 0.0)
def TestNegativeResponse(self, prefix=None):
"""Test for NXDOMAIN hijaaking."""
is_broken = False
if prefix:
hostname = prefix
warning_suffix = ' (%s)' % prefix
else:
hostname = 'test'
warning_suffix = ''
poison_test = '%s.nb%s.google.com.' % (hostname, random.random())
(response, duration, error_msg) = self.TimedRequest('A', poison_test,
timeout=self.health_timeout*2)
if not response:
if not error_msg:
error_msg = 'No response'
is_broken = True
elif response.answer:
error_msg = 'NXDOMAIN Hijacking' + warning_suffix
return (is_broken, error_msg, duration)
def TestRootNsResponse(self):
"""Test a . NS response.
NOTE: This is a bad way to gauge performance of a nameserver, as the
response length varies between nameserver configurations.
"""
is_broken = False
error_msg = None
(response, duration, error_msg) = self.TimedRequest('NS', '.')
if not response:
response_code = None
is_broken = True
if not error_msg:
error_msg = 'No response'
else:
response_code = rcode.to_text(response.rcode())
if response_code in FATAL_RCODES:
error_msg = response_code
is_broken = True
return (is_broken, error_msg, duration)
def TestWwwNegativeResponse(self):
return self.TestNegativeResponse(prefix='www')
def TestARootServerResponse(self):
return self.TestAnswers('A', 'a.root-servers.net.', '198.41.0.4', critical=True)
def TestPortBehavior(self, tries=0):
"""This is designed to be called multiple times to retry bad results."""
if self.port_behavior:
if 'UNKNOWN' not in self.port_behavior:
return (False, None, 0)
tries += 1
response = self.TimedRequest('TXT', 'porttest.dns-oarc.net.', timeout=5)[0]
if response and response.answer:
if len(response.answer) > 1:
text = response.answer[1].to_rdataset().to_text()
self.port_behavior = text.split('"')[1]
if (not self.port_behavior or 'UNKNOWN' in self.port_behavior) and tries < MAX_PORT_BEHAVIOR_TRIES:
time.sleep(1)
return self.TestPortBehavior(tries=tries)
# print "%s behavior: %s (tries=%s)" % (self, self.port_behavior, tries)
return (False, None, 0)
def StoreWildcardCache(self):
"""Store a set of wildcard records."""
timeout = self.health_timeout * SHARED_CACHE_TIMEOUT_MULTIPLIER
attempted = []
while len(self.cache_checks) != TOTAL_WILDCARDS_TO_STORE:
if len(attempted) == MAX_STORE_ATTEMPTS:
self.disabled = 'Unable to get uncached results for: %s' % ', '.join(attempted)
return False
domain = random.choice(WILDCARD_DOMAINS)
hostname = 'namebench%s.%s' % (random.randint(1, 2**32), domain)
attempted.append(hostname)
response = self.TimedRequest('A', hostname, timeout=timeout)[0]
if response and response.answer:
self.cache_checks.append((hostname, response, self.timer()))
else:
sys.stdout.write('x')
def TestSharedCache(self, other_ns):
"""Is this nameserver sharing a cache with another nameserver?
Args:
other_ns: A nameserver to compare it to.
Returns:
A tuple containing:
- Boolean of whether or not this host has a shared cache
- The faster NameServer object
- The slower NameServer object
"""
timeout = self.health_timeout * SHARED_CACHE_TIMEOUT_MULTIPLIER
checked = []
shared = False
if self.disabled or other_ns.disabled:
return False
if not other_ns.cache_checks:
print '%s has no cache checks (disabling - how did this happen?)' % other_ns
other_ns.disabled = 'Unable to perform cache checks.'
return False
for (ref_hostname, ref_response, ref_timestamp) in other_ns.cache_checks:
response = self.TimedRequest('A', ref_hostname, timeout=timeout)[0]
# Retry once - this *may* cause false positives however, as the TTL may be updated.
if not response or not response.answer:
sys.stdout.write('x')
response = self.TimedRequest('A', ref_hostname, timeout=timeout)[0]
if response and response.answer:
ref_ttl = ref_response.answer[0].ttl
ttl = response.answer[0].ttl
delta = abs(ref_ttl - ttl)
query_age = self.timer() - ref_timestamp
delta_age_delta = abs(query_age - delta)
if delta > 0 and delta_age_delta < 2:
return other_ns
else:
sys.stdout.write('!')
checked.append(ref_hostname)
if not checked:
self.AddFailure('Failed to test %s wildcard caches' % len(other_ns.cache_checks))
return shared
def CheckCensorship(self, tests):
"""Check to see if results from a nameserver are being censored."""
for (check, expected) in tests:
(req_type, req_name) = check.split(' ')
expected_values = expected.split(',')
result = self.TestAnswers(req_type.upper(), req_name, expected_values,
timeout=CENSORSHIP_TIMEOUT)
warning = result[1]
if warning:
self.AddWarning(warning, penalty=False)
def CheckHealth(self, sanity_checks=None, fast_check=False, final_check=False, port_check=False):
"""Qualify a nameserver to see if it is any good."""
is_fatal = False
if fast_check:
tests = [(self.TestARootServerResponse, [])]
is_fatal = True
sanity_checks = []
elif final_check:
tests = [(self.TestWwwNegativeResponse, []), (self.TestPortBehavior, []), (self.TestNodeId, [])]
elif port_check:
tests = [(self.TestPortBehavior, []), (self.TestNodeId, [])]
else:
# Put the bind version here so that we have a great minimum latency measurement.
tests = [(self.TestNegativeResponse, []), (self.TestBindVersion, [])]
if sanity_checks:
for (check, expected_value) in sanity_checks:
(req_type, req_name) = check.split(' ')
expected_values = expected_value.split(',')
tests.append((self.TestAnswers, [req_type.upper(), req_name, expected_values]))
for test in tests:
(function, args) = test
(is_broken, warning, duration) = function(*args)
if args:
test_name = args[1]
else:
test_name = function.__name__
self.checks.append((test_name, is_broken, warning, duration))
if is_broken:
self.AddFailure('%s: %s' % (test_name, warning), fatal=is_fatal)
if warning:
# Special case for NXDOMAIN de-duplication
if not ('NXDOMAIN' in warning and 'NXDOMAIN Hijacking' in self.warnings):
self.AddWarning(warning)
if self.disabled:
break
return self.disabled
|
|
import commands
import datetime
import os
import platform
import shlex
import sys
import time
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
from resource_suite import ResourceBase
import lib
class Test_Catalog(ResourceBase, unittest.TestCase):
def setUp(self):
super(Test_Catalog, self).setUp()
def tearDown(self):
super(Test_Catalog, self).tearDown()
###################
# izonereport
###################
def test_izonereport_and_validate(self):
jsonschema_installed = True
if lib.get_os_distribution() == 'Ubuntu' and lib.get_os_distribution_version_major() == '12':
jsonschema_installed = False
# bad URL
self.admin.assert_icommand("izonereport > out.txt", use_unsafe_shell=True)
if jsonschema_installed:
lib.assert_command('python ../../iRODS/scripts/python/validate_json.py out.txt https://example.org/badurl', 'STDERR_MULTILINE',
['WARNING: Validation Failed', 'ValueError: No JSON object could be decoded'], desired_rc=0)
else:
lib.assert_command('python ../../iRODS/scripts/python/validate_json.py out.txt https://example.org/badurl',
'STDERR_SINGLELINE', 'jsonschema not installed', desired_rc=0)
# good URL
self.admin.assert_icommand("izonereport > out.txt", use_unsafe_shell=True)
if jsonschema_installed:
lib.assert_command('python ../../iRODS/scripts/python/validate_json.py out.txt https://schemas.irods.org/configuration/v2/zone_bundle.json',
'STDOUT_MULTILINE', ['Validating', '... Success'], desired_rc=0)
else:
lib.assert_command('python ../../iRODS/scripts/python/validate_json.py out.txt https://schemas.irods.org/configuration/v2/zone_bundle.json',
'STDERR_SINGLELINE', 'jsonschema not installed', desired_rc=0)
# cleanup
os.remove('out.txt')
###################
# icd
###################
def test_empty_icd(self):
self.admin.assert_icommand("ils -L", 'STDOUT_SINGLELINE', "test") # whatever
self.admin.assert_icommand("icd " + self.testdir) # get into subdir
self.admin.assert_icommand("icd") # just go home
self.admin.assert_icommand("ils", 'STDOUT_SINGLELINE', "/" + self.admin.zone_name + "/home/" + self.admin.username + ":")
def test_empty_icd_verbose(self):
self.admin.assert_icommand("icd " + self.testdir) # get into subdir
self.admin.assert_icommand("icd -v", 'STDOUT_SINGLELINE', "Deleting (if it exists) session envFile:")
self.admin.assert_icommand("ils", 'STDOUT_SINGLELINE', "/" + self.admin.zone_name + "/home/" + self.admin.username + ":")
def test_icd_to_subdir(self):
self.admin.assert_icommand("icd " + self.testdir) # get into subdir
self.admin.assert_icommand("ils", 'STDOUT_SINGLELINE', "/" + self.admin.zone_name + "/home/" +
self.admin.username + "/" + self.admin._session_id + "/" + self.testdir + ":")
def test_icd_to_parentdir(self):
self.admin.assert_icommand("icd ..") # go to parent
self.admin.assert_icommand("ils", 'STDOUT_SINGLELINE', "/" + self.admin.zone_name + "/home/" + self.admin.username + ":")
def test_icd_to_root(self):
self.admin.assert_icommand("icd /") # go to root
self.admin.assert_icommand("ils", 'STDOUT_SINGLELINE', "/:") # listing
def test_icd_to_root_with_badpath(self):
# go to root with bad path
self.admin.assert_icommand("icd /doesnotexist", 'STDOUT_SINGLELINE', "No such directory (collection):")
###################
# iexit
###################
def test_iexit(self):
self.admin.assert_icommand("iexit") # just go home
def test_iexit_verbose(self):
self.admin.assert_icommand("iexit -v", 'STDOUT_SINGLELINE', "Deleting (if it exists) session envFile:") # home, verbose
def test_iexit_with_bad_option(self):
self.admin.assert_icommand_fail("iexit -z") # run iexit with bad option
def test_iexit_with_bad_parameter(self):
self.admin.assert_icommand_fail("iexit badparameter") # run iexit with bad parameter
###################
# ihelp
###################
def test_local_ihelp(self):
self.admin.assert_icommand('ihelp', 'STDOUT_SINGLELINE', 'The iCommands and a brief description of each:')
def test_local_ihelp_with_help(self):
self.admin.assert_icommand("ihelp -h", 'STDOUT_SINGLELINE', "Display iCommands synopsis") # run ihelp with help
def test_local_ihelp_all(self):
self.admin.assert_icommand("ihelp -a", 'STDOUT_SINGLELINE', "Usage") # run ihelp on all icommands
def test_local_ihelp_with_good_icommand(self):
self.admin.assert_icommand("ihelp ils", 'STDOUT_SINGLELINE', "Usage") # run ihelp with good icommand
def test_local_ihelp_with_bad_icommand(self):
self.admin.assert_icommand_fail("ihelp idoesnotexist") # run ihelp with bad icommand
def test_local_ihelp_with_bad_option(self):
self.admin.assert_icommand_fail("ihelp -z") # run ihelp with bad option
###################
# imkdir
###################
def test_local_imkdir(self):
# local setup
mytestdir = "testingimkdir"
self.admin.assert_icommand_fail("ils -L " + mytestdir, 'STDOUT_SINGLELINE', mytestdir) # should not be listed
self.admin.assert_icommand("imkdir " + mytestdir) # imkdir
self.admin.assert_icommand("ils -L " + mytestdir, 'STDOUT_SINGLELINE', mytestdir) # should be listed
def test_local_imkdir_with_trailing_slash(self):
# local setup
mytestdir = "testingimkdirwithslash"
self.admin.assert_icommand_fail("ils -L " + mytestdir + "/", 'STDOUT_SINGLELINE', mytestdir) # should not be listed
self.admin.assert_icommand("imkdir " + mytestdir + "/") # imkdir
self.admin.assert_icommand("ils -L " + mytestdir, 'STDOUT_SINGLELINE', mytestdir) # should be listed
def test_local_imkdir_with_trailing_slash_already_exists(self):
# local setup
mytestdir = "testingimkdirwithslash"
self.admin.assert_icommand("imkdir " + mytestdir + "/") # imkdir
self.admin.assert_icommand_fail("imkdir " + mytestdir) # should fail, already exists
self.admin.assert_icommand_fail("imkdir " + mytestdir + "/") # should fail, already exists
def test_local_imkdir_when_dir_already_exists(self):
# local setup
mytestdir = "testingimkdiralreadyexists"
self.admin.assert_icommand("imkdir " + mytestdir) # imkdir
self.admin.assert_icommand_fail("imkdir " + mytestdir) # should fail, already exists
def test_local_imkdir_when_file_already_exists(self):
# local setup
self.admin.assert_icommand_fail("imkdir " + self.testfile) # should fail, filename already exists
def test_local_imkdir_with_parent(self):
# local setup
mytestdir = "parent/testingimkdirwithparent"
self.admin.assert_icommand_fail("ils -L " + mytestdir, 'STDOUT_SINGLELINE', mytestdir) # should not be listed
self.admin.assert_icommand("imkdir -p " + mytestdir) # imkdir with parent
self.admin.assert_icommand("ils -L " + mytestdir, 'STDOUT_SINGLELINE', mytestdir) # should be listed
def test_local_imkdir_with_bad_option(self):
self.admin.assert_icommand_fail("imkdir -z") # run imkdir with bad option
###################
# iquest
###################
def test_iquest_totaldatasize(self):
self.admin.assert_icommand("iquest \"select sum(DATA_SIZE) where COLL_NAME like '/" +
self.admin.zone_name + "/home/%'\"", 'STDOUT_SINGLELINE', "DATA_SIZE") # selects total data size
def test_iquest_bad_format(self):
self.admin.assert_icommand("iquest \"bad formatting\"", 'STDERR_SINGLELINE',
"INPUT_ARG_NOT_WELL_FORMED_ERR") # bad request
def test_iquest_incorrect_format_count(self):
self.admin.assert_icommand("iquest \"%s %s\" \"select COLL_NAME where COLL_NAME like '%home%'\"",
'STDERR_SINGLELINE', 'boost::too_few_args: format-string referred to more arguments than were passed')
###################
# isysmeta
###################
def test_isysmeta_no_resc_group__2819(self):
self.admin.assert_icommand("ils -L", 'STDOUT_SINGLELINE', self.testfile) # basic listing
self.admin.assert_icommand_fail("isysmeta ls -l "+self.testfile, 'STDOUT_SINGLELINE',
"resc_group_name:") # should not exist
def test_isysmeta_init_set_and_reset(self):
self.admin.assert_icommand("ils -L", 'STDOUT_SINGLELINE', "pydevtest_testfile.txt") # basic listing
self.admin.assert_icommand("isysmeta ls pydevtest_testfile.txt", 'STDOUT_SINGLELINE',
"data_expiry_ts (expire time): 00000000000: None") # initialized with zeros
offset_seconds = 1
expected_time_string = time.strftime('%Y-%m-%d.%H:%M:%S', time.localtime(offset_seconds))
# set to 1 sec after epoch
self.admin.assert_icommand('isysmeta mod pydevtest_testfile.txt {0}'.format(offset_seconds), "EMPTY")
self.admin.assert_icommand("isysmeta ls pydevtest_testfile.txt", 'STDOUT_SINGLELINE',
"data_expiry_ts (expire time): 00000000001: {0}".format(expected_time_string)) # confirm
self.admin.assert_icommand("isysmeta mod pydevtest_testfile.txt 0", "EMPTY") # reset to zeros
self.admin.assert_icommand("isysmeta ls pydevtest_testfile.txt", 'STDOUT_SINGLELINE',
"data_expiry_ts (expire time): 00000000000: None") # confirm
def test_isysmeta_relative_set(self):
self.admin.assert_icommand("ils -L", 'STDOUT_SINGLELINE', "pydevtest_testfile.txt") # basic listing
self.admin.assert_icommand("isysmeta ls pydevtest_testfile.txt", 'STDOUT_SINGLELINE',
"data_expiry_ts (expire time): 00000000000: None") # initialized with zeros
def check_relative_expiry(offset_seconds):
def get_future_time_string(t):
return (t + datetime.timedelta(0, offset_seconds)).strftime('%Y-%m-%d.%H:%M:%S')
current_time = datetime.datetime.now()
# Race condition: first assert fails if second threshold crossed in between iCAT recording
# current time and this script recording current time
try:
self.admin.assert_icommand("isysmeta ls pydevtest_testfile.txt", 'STDOUT_SINGLELINE',
get_future_time_string(current_time))
# Back script's current_time off by a second, since iCAT command issued before script records
# current_time
except AssertionError:
self.admin.assert_icommand("isysmeta ls pydevtest_testfile.txt", 'STDOUT_SINGLELINE',
get_future_time_string(current_time - datetime.timedelta(0, 1)))
# test seconds syntax
seconds_ahead = 10
self.admin.assert_icommand("isysmeta mod pydevtest_testfile.txt +" + str(seconds_ahead), "EMPTY")
check_relative_expiry(seconds_ahead)
# test hours syntax
seconds_ahead = 60 * 60 # 1 hour
self.admin.assert_icommand("isysmeta mod pydevtest_testfile.txt +1h", "EMPTY")
check_relative_expiry(seconds_ahead)
class Test_CatalogPermissions(ResourceBase, unittest.TestCase):
def setUp(self):
super(Test_CatalogPermissions, self).setUp()
def tearDown(self):
super(Test_CatalogPermissions, self).tearDown()
def test_isysmeta_no_permission(self):
self.user0.assert_icommand('icd /' + self.user0.zone_name + '/home/public') # get into public/
self.user0.assert_icommand('ils -L ', 'STDOUT_SINGLELINE', 'pydevtest_testfile.txt')
self.user0.assert_icommand('isysmeta ls pydevtest_testfile.txt', 'STDOUT_SINGLELINE',
'data_expiry_ts (expire time): 00000000000: None') # initialized with zeros
self.user0.assert_icommand('isysmeta mod pydevtest_testfile.txt 1', 'STDERR_SINGLELINE', 'CAT_NO_ACCESS_PERMISSION') # cannot set expiry
|
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
vispy backend for glfw.
"""
# To install GLFW on Ubuntu, use sudo apt-get install libglfw3.
# On OSX, consider using brew.
from __future__ import division
import atexit
from time import sleep
import gc
import os
from ..base import (BaseApplicationBackend, BaseCanvasBackend,
BaseTimerBackend)
from ...util import keys, logger
from ...util.ptime import time
from ... import config
USE_EGL = config['gl_backend'].lower().startswith('es')
# -------------------------------------------------------------------- init ---
try:
from ...ext import glfw
# Map native keys to vispy keys
KEYMAP = {
glfw.GLFW_KEY_LEFT_SHIFT: keys.SHIFT,
glfw.GLFW_KEY_RIGHT_SHIFT: keys.SHIFT,
glfw.GLFW_KEY_LEFT_CONTROL: keys.CONTROL,
glfw.GLFW_KEY_RIGHT_CONTROL: keys.CONTROL,
glfw.GLFW_KEY_LEFT_ALT: keys.ALT,
glfw.GLFW_KEY_RIGHT_ALT: keys.ALT,
glfw.GLFW_KEY_LEFT_SUPER: keys.META,
glfw.GLFW_KEY_RIGHT_SUPER: keys.META,
glfw.GLFW_KEY_LEFT: keys.LEFT,
glfw.GLFW_KEY_UP: keys.UP,
glfw.GLFW_KEY_RIGHT: keys.RIGHT,
glfw.GLFW_KEY_DOWN: keys.DOWN,
glfw.GLFW_KEY_PAGE_UP: keys.PAGEUP,
glfw.GLFW_KEY_PAGE_DOWN: keys.PAGEDOWN,
glfw.GLFW_KEY_INSERT: keys.INSERT,
glfw.GLFW_KEY_DELETE: keys.DELETE,
glfw.GLFW_KEY_HOME: keys.HOME,
glfw.GLFW_KEY_END: keys.END,
glfw.GLFW_KEY_ESCAPE: keys.ESCAPE,
glfw.GLFW_KEY_BACKSPACE: keys.BACKSPACE,
glfw.GLFW_KEY_F1: keys.F1,
glfw.GLFW_KEY_F2: keys.F2,
glfw.GLFW_KEY_F3: keys.F3,
glfw.GLFW_KEY_F4: keys.F4,
glfw.GLFW_KEY_F5: keys.F5,
glfw.GLFW_KEY_F6: keys.F6,
glfw.GLFW_KEY_F7: keys.F7,
glfw.GLFW_KEY_F8: keys.F8,
glfw.GLFW_KEY_F9: keys.F9,
glfw.GLFW_KEY_F10: keys.F10,
glfw.GLFW_KEY_F11: keys.F11,
glfw.GLFW_KEY_F12: keys.F12,
glfw.GLFW_KEY_SPACE: keys.SPACE,
glfw.GLFW_KEY_ENTER: keys.ENTER,
'\r': keys.ENTER,
glfw.GLFW_KEY_TAB: keys.TAB,
}
BUTTONMAP = {glfw.GLFW_MOUSE_BUTTON_LEFT: 1,
glfw.GLFW_MOUSE_BUTTON_RIGHT: 2,
glfw.GLFW_MOUSE_BUTTON_MIDDLE: 3
}
except Exception as exp:
available, testable, why_not, which = False, False, str(exp), None
else:
if USE_EGL:
available, testable, why_not = False, False, 'EGL not supported'
which = 'glfw ' + str(glfw.__version__)
else:
available, testable, why_not = True, True, None
which = 'glfw ' + str(glfw.__version__)
MOD_KEYS = [keys.SHIFT, keys.ALT, keys.CONTROL, keys.META]
_GLFW_INITIALIZED = False
_VP_GLFW_ALL_WINDOWS = []
def _get_glfw_windows():
wins = list()
for win in _VP_GLFW_ALL_WINDOWS:
if isinstance(win, CanvasBackend):
wins.append(win)
return wins
# -------------------------------------------------------------- capability ---
capability = dict( # things that can be set by the backend
title=True,
size=True,
position=True,
show=True,
vsync=True,
resizable=True,
decorate=True,
fullscreen=True,
context=True,
multi_window=True,
scroll=True,
parent=False,
always_on_top=True,
)
# ------------------------------------------------------- set_configuration ---
def _set_config(c):
"""Set gl configuration for GLFW """
glfw.glfwWindowHint(glfw.GLFW_RED_BITS, c['red_size'])
glfw.glfwWindowHint(glfw.GLFW_GREEN_BITS, c['green_size'])
glfw.glfwWindowHint(glfw.GLFW_BLUE_BITS, c['blue_size'])
glfw.glfwWindowHint(glfw.GLFW_ALPHA_BITS, c['alpha_size'])
glfw.glfwWindowHint(glfw.GLFW_ACCUM_RED_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_GREEN_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_BLUE_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_ALPHA_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_DEPTH_BITS, c['depth_size'])
glfw.glfwWindowHint(glfw.GLFW_STENCIL_BITS, c['stencil_size'])
# glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MAJOR, c['major_version'])
# glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MINOR, c['minor_version'])
# glfw.glfwWindowHint(glfw.GLFW_SRGB_CAPABLE, c['srgb'])
glfw.glfwWindowHint(glfw.GLFW_SAMPLES, c['samples'])
glfw.glfwWindowHint(glfw.GLFW_STEREO, c['stereo'])
if not c['double_buffer']:
raise RuntimeError('GLFW must double buffer, consider using a '
'different backend, or using double buffering')
# ------------------------------------------------------------- application ---
_glfw_errors = []
def _error_callback(num, descr):
_glfw_errors.append('Error %s: %s' % (num, descr))
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
self._timers = list()
def _add_timer(self, timer):
if timer not in self._timers:
self._timers.append(timer)
def _vispy_get_backend_name(self):
return 'Glfw'
def _vispy_process_events(self):
glfw.glfwPollEvents()
for timer in self._timers:
timer._tick()
wins = _get_glfw_windows()
for win in wins:
if win._needs_draw:
win._needs_draw = False
win._on_draw()
def _vispy_run(self):
wins = _get_glfw_windows()
while any(w._id is not None and not glfw.glfwWindowShouldClose(w._id)
for w in wins):
self._vispy_process_events()
self._vispy_quit() # to clean up
def _vispy_quit(self):
# Close windows
wins = _get_glfw_windows()
for win in wins:
if win._vispy_canvas is not None:
win._vispy_canvas.close()
# tear down timers
for timer in self._timers:
timer._vispy_stop()
self._timers = []
def _vispy_get_native_app(self):
global _GLFW_INITIALIZED
if not _GLFW_INITIALIZED:
cwd = os.getcwd()
glfw.glfwSetErrorCallback(_error_callback)
try:
if not glfw.glfwInit(): # only ever call once
raise OSError('Could not init glfw:\n%r' % _glfw_errors)
finally:
os.chdir(cwd)
glfw.glfwSetErrorCallback(0)
atexit.register(glfw.glfwTerminate)
_GLFW_INITIALIZED = True
return glfw
# ------------------------------------------------------------------ canvas ---
class CanvasBackend(BaseCanvasBackend):
""" Glfw backend for Canvas abstract class."""
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
p = self._process_backend_kwargs(kwargs)
self._initialized = False
# Deal with config
_set_config(p.context.config)
# Deal with context
p.context.shared.add_ref('glfw', self)
if p.context.shared.ref is self:
share = None
else:
share = p.context.shared.ref._id
glfw.glfwWindowHint(glfw.GLFW_REFRESH_RATE, 0) # highest possible
glfw.glfwSwapInterval(1 if p.vsync else 0)
glfw.glfwWindowHint(glfw.GLFW_RESIZABLE, int(p.resizable))
glfw.glfwWindowHint(glfw.GLFW_DECORATED, int(p.decorate))
glfw.glfwWindowHint(glfw.GLFW_VISIBLE, 0) # start out hidden
glfw.glfwWindowHint(glfw.GLFW_FLOATING, int(p.always_on_top))
if p.fullscreen is not False:
self._fullscreen = True
if p.fullscreen is True:
monitor = glfw.glfwGetPrimaryMonitor()
else:
monitor = glfw.glfwGetMonitors()
if p.fullscreen >= len(monitor):
raise ValueError('fullscreen must be <= %s'
% len(monitor))
monitor = monitor[p.fullscreen]
use_size = glfw.glfwGetVideoMode(monitor)[:2]
if use_size != tuple(p.size):
logger.debug('Requested size %s, will be ignored to '
'use fullscreen mode %s' % (p.size, use_size))
size = use_size
else:
self._fullscreen = False
monitor = None
size = p.size
self._id = glfw.glfwCreateWindow(width=size[0], height=size[1],
title=p.title, monitor=monitor,
share=share)
if not self._id:
raise RuntimeError('Could not create window')
_VP_GLFW_ALL_WINDOWS.append(self)
self._mod = list()
# Register callbacks
glfw.glfwSetWindowRefreshCallback(self._id, self._on_draw)
glfw.glfwSetWindowSizeCallback(self._id, self._on_resize)
glfw.glfwSetKeyCallback(self._id, self._on_key_press)
glfw.glfwSetCharCallback(self._id, self._on_key_char)
glfw.glfwSetMouseButtonCallback(self._id, self._on_mouse_button)
glfw.glfwSetScrollCallback(self._id, self._on_mouse_scroll)
glfw.glfwSetCursorPosCallback(self._id, self._on_mouse_motion)
glfw.glfwSetWindowCloseCallback(self._id, self._on_close)
self._vispy_canvas_ = None
self._needs_draw = False
self._vispy_canvas.set_current()
if p.position is not None:
self._vispy_set_position(*p.position)
if p.show:
glfw.glfwShowWindow(self._id)
# Init
self._initialized = True
self._next_key_events = []
self._next_key_text = {}
self._vispy_canvas.set_current()
self._vispy_canvas.events.initialize()
def _vispy_warmup(self):
etime = time() + 0.25
while time() < etime:
sleep(0.01)
self._vispy_canvas.set_current()
self._vispy_canvas.app.process_events()
def _vispy_set_current(self):
if self._id is None:
return
# Make this the current context
glfw.glfwMakeContextCurrent(self._id)
def _vispy_swap_buffers(self):
if self._id is None:
return
# Swap front and back buffer
glfw.glfwSwapBuffers(self._id)
def _vispy_set_title(self, title):
if self._id is None:
return
# Set the window title. Has no effect for widgets
glfw.glfwSetWindowTitle(self._id, title.encode('utf-8'))
def _vispy_set_size(self, w, h):
if self._id is None:
return
# Set size of the widget or window
glfw.glfwSetWindowSize(self._id, w, h)
def _vispy_set_position(self, x, y):
if self._id is None:
return
# Set position of the widget or window. May have no effect for widgets
glfw.glfwSetWindowPos(self._id, x, y)
def _vispy_set_visible(self, visible):
# Show or hide the window or widget
if self._id is None:
return
if visible:
glfw.glfwShowWindow(self._id)
# this ensures that the show takes effect
self._vispy_update()
else:
glfw.glfwHideWindow(self._id)
def _vispy_set_fullscreen(self, fullscreen):
logger.warn('Cannot change fullscreen mode for GLFW backend')
def _vispy_update(self):
# Invoke a redraw, passing it on to the canvas
if self._vispy_canvas is None or self._id is None:
return
# Mark that this window wants to be drawn on the next loop iter
self._needs_draw = True
def _vispy_close(self):
# Force the window or widget to shut down
if self._id is not None:
self._vispy_canvas = None
# glfw.glfwSetWindowShouldClose() # Does not really cause a close
self._vispy_set_visible(False)
self._id, id_ = None, self._id
glfw.glfwDestroyWindow(id_)
gc.collect() # help ensure context gets destroyed
def _vispy_get_size(self):
if self._id is None:
return
w, h = glfw.glfwGetWindowSize(self._id)
return w, h
def _vispy_get_physical_size(self):
if self._id is None:
return
w, h = glfw.glfwGetFramebufferSize(self._id)
return w, h
def _vispy_get_position(self):
if self._id is None:
return
x, y = glfw.glfwGetWindowPos(self._id)
return x, y
def _vispy_get_fullscreen(self):
return self._fullscreen
##########################################
# Notify vispy of events triggered by GLFW
def _on_resize(self, _id, w, h):
if self._vispy_canvas is None:
return
self._vispy_canvas.events.resize(
size=(w, h), physical_size=self._vispy_get_physical_size())
def _on_close(self, _id):
if self._vispy_canvas is None:
return
self._vispy_canvas.close()
def _on_draw(self, _id=None):
if self._vispy_canvas is None or self._id is None:
return
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None) # (0, 0, w, h))
def _on_mouse_button(self, _id, button, action, mod):
if self._vispy_canvas is None and self._id is not None:
return
pos = glfw.glfwGetCursorPos(self._id)
if button < 3:
# Mouse click event
button = BUTTONMAP.get(button, 0)
if action == glfw.GLFW_PRESS:
fun = self._vispy_mouse_press
elif action == glfw.GLFW_RELEASE:
fun = self._vispy_mouse_release
else:
return
fun(pos=pos, button=button, modifiers=self._mod)
def _on_mouse_scroll(self, _id, x_off, y_off):
if self._vispy_canvas is None and self._id is not None:
return
pos = glfw.glfwGetCursorPos(self._id)
delta = (float(x_off), float(y_off))
self._vispy_canvas.events.mouse_wheel(pos=pos, delta=delta,
modifiers=self._mod)
def _on_mouse_motion(self, _id, x, y):
if self._vispy_canvas is None:
return
self._vispy_mouse_move(pos=(x, y), modifiers=self._mod)
def _on_key_press(self, _id, key, scancode, action, mod):
if self._vispy_canvas is None:
return
key, text = self._process_key(key)
if action == glfw.GLFW_PRESS:
fun = self._vispy_canvas.events.key_press
down = True
elif action == glfw.GLFW_RELEASE:
fun = self._vispy_canvas.events.key_release
down = False
else:
return
self._process_mod(key, down=down)
# NOTE: GLFW only provides localized characters via _on_key_char, so if
# this event contains a character we store all other data and dispatch
# it once the final unicode character is sent shortly after.
if text != '' and action == glfw.GLFW_PRESS:
self._next_key_events.append((fun, key, self._mod))
else:
if key in self._next_key_text:
text = self._next_key_text[key]
del self._next_key_text[key]
fun(key=key, text=text, modifiers=self._mod)
def _on_key_char(self, _id, text):
# Repeat strokes (frequency configured at OS) are sent here only,
# no regular _on_key_press events. Currently ignored!
if len(self._next_key_events) == 0:
return
(fun, key, mod) = self._next_key_events.pop(0)
fun(key=key, text=chr(text), modifiers=mod)
self._next_key_text[key] = text
def _process_key(self, key):
if 32 <= key <= 127:
return keys.Key(chr(key)), chr(key)
elif key in KEYMAP:
return KEYMAP[key], ''
else:
return None, ''
def _process_mod(self, key, down):
"""Process (possible) keyboard modifiers
GLFW provides "mod" with many callbacks, but not (critically) the
scroll callback, so we keep track on our own here.
"""
if key in MOD_KEYS:
if down:
if key not in self._mod:
self._mod.append(key)
elif key in self._mod:
self._mod.pop(self._mod.index(key))
return self._mod
# ------------------------------------------------------------------- timer ---
class TimerBackend(BaseTimerBackend):
def __init__(self, vispy_timer):
BaseTimerBackend.__init__(self, vispy_timer)
vispy_timer._app._backend._add_timer(self)
self._vispy_stop()
def _vispy_start(self, interval):
self._interval = interval
self._next_time = time() + self._interval
def _vispy_stop(self):
self._next_time = float('inf')
def _tick(self):
if time() >= self._next_time:
self._vispy_timer._timeout()
self._next_time = time() + self._interval
|
|
import argparse
from time import sleep
from datetime import datetime
from velox_common import *
# args is argparse.Namespace. returns a Cluster().
def get_cluster(args):
return Cluster(args.region, args.cluster_id, args.num_servers, args.num_clients)
# sub-commands. args is argparse.Namespace.
def command_launch(args):
cluster = get_cluster(args)
kwargs = dict(vars(args))
pprint("Launching velox clusters")
check_for_instances(cluster)
if args.no_spot:
provision_instances(cluster, **kwargs)
else:
provision_spot(cluster, **kwargs)
wait_all_hosts_up(cluster)
command_claim(args)
def command_claim(args):
cluster = get_cluster(args)
pprint("Claiming untagged instances...")
claim_instances(cluster)
def command_rebuild(args):
cluster = get_cluster(args)
kwargs = dict(vars(args))
pprint("Rebuilding velox clusters")
assign_hosts(cluster)
stop_velox_processes()
rebuild_servers(**kwargs)
def command_terminate(args):
cluster = get_cluster(args)
kwargs = dict(vars(args))
terminate_cluster(cluster, **kwargs)
def command_install_ykit(args):
cluster = get_cluster(args)
pprint("Installing Yourkit")
assign_hosts(cluster)
install_ykit(cluster)
def command_client_bench(args):
cluster = get_cluster(args)
kwargs = dict(vars(args))
runid = "THECRANK-%s" % (str(datetime.now()).replace(' ', '_').replace(":", '_'))
pprint("Running THE CRANKSHAW")
assign_hosts(cluster)
start_servers_with_zk(cluster, **kwargs)
sleep(5)
run_velox_client_bench(cluster, **kwargs)
stop_velox_processes()
fetch_logs(cluster, runid, **kwargs)
pprint("THE CRANKSHAW has completed!")
def command_client_bench_local(args):
kwargs = dict(vars(args))
command_deploy_zookeeper_local(args)
pprint("Running THE CRANKSHAW locally! (1 client only)")
start_servers_local(**kwargs)
sleep(5)
client_bench_local_single(**kwargs)
kill_velox_local()
pprint("THE CRANKSHAW has completed!")
def command_ycsb_bench(args):
cluster = get_cluster(args)
kwargs = dict(vars(args))
runid = "YCSB-%s" % (str(datetime.now()).replace(' ', '_').replace(":", '_'))
pprint("Running YCSB")
assign_hosts(cluster)
start_servers(cluster, **kwargs)
sleep(5)
run_ycsb(cluster, **kwargs)
stop_velox_processes()
fetch_logs(cluster, runid, **kwargs)
pprint("YCSB has completed!")
def command_ycsb_bench_local(args):
kwargs = dict(vars(args))
pprint("Running YCSB locally! (1 client only)")
start_servers_local(**kwargs)
sleep(5)
run_ycsb_local(**kwargs)
kill_velox_local()
pprint("YCSB has completed!")
def command_deploy_zookeeper(args):
cluster = get_cluster(args)
pprint("Deploying Zookeeper")
assign_hosts(cluster)
install_zookeeper_cluster(cluster, args.zk_config)
start_zookeeper_cluster(cluster)
def command_deploy_zookeeper_local(args):
pprint("Deploying Zookeeper locally!")
install_zookeeper_cluster_local(args.zk_config)
start_zookeeper_cluster_local()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Setup velox on EC2')
##################################
######### global options #########
##################################
parser.add_argument('--cluster_id', '-c', dest='cluster_id', required=True,
help='Cluster ID (tag) to use for your instances')
parser.add_argument('--num_servers', '-ns', dest='num_servers', type=int, required=True,
help='Number of server machines per cluster.')
parser.add_argument('--num_clients', '-nc', dest='num_clients', type=int, required=True,
help='Number of client machines per cluster.')
subparsers = parser.add_subparsers(title='Sub-Commands',
description='Valid Sub-Commands',
help='Sub-Command Help',
dest='subcommand')
##################################
######### common options #########
##################################
# common cluster config options for ec2
common_cluster_ec2 = argparse.ArgumentParser(add_help=False)
common_cluster_ec2.add_argument('--region', '-r', dest='region', default="us-west-2", type=str,
help="EC2 region. [default: %(default)s]")
# common benchmark options (base)
common_benchmark = argparse.ArgumentParser(add_help=False)
common_benchmark.add_argument('--profile', action='store_true',
help='Run JVM with hprof cpu profiling. [default: %(default)s]')
common_benchmark.add_argument('--profile_depth', dest='profile_depth', default=2, type=int,
help='Stack depth to trace when running profiling. [default: %(default)s]')
common_benchmark.add_argument('--network_service', dest='network_service',
default='array', type=str, choices=['array', 'nio'],
help="Which network service to use. [default: %(default)s]")
common_benchmark.add_argument('--buffer_size', dest='buffer_size',
default=16384*8, type=int,
help='Size (in bytes) to make the network buffer. [default: %(default)s]')
common_benchmark.add_argument('--sweep_time', dest='sweep_time',
default=500, type=int,
help='Time (in ms) the ArrayNetworkService send sweep thread should wait between sweeps. [default: %(default)s]')
common_benchmark.add_argument('--parallelism', dest='parallelism',
default=64, type=int,
help='Number of threads per benchmark client. [default: %(default)s]')
common_benchmark.add_argument('--read_pct', dest='read_pct',
default=0.5, type=float,
help='Percentage of workload operations which are reads. [default: %(default)s]')
common_benchmark.add_argument('--max_time', dest='max_time',
default=60, type=int,
help='Maximum execution time (in seconds) of the benchmark. [default: %(default)s]')
common_benchmark.add_argument('--ops', dest='ops',
default=100000, type=int,
help='Number of operations to perform in the benchmark. [default: %(default)s]')
common_benchmark.add_argument('--heap_size_gb', dest='heap_size',
default=230, type=int,
help='Size (in GB) to make the JVM heap. [default: %(default)s]')
# common benchmark options for ec2 (includes benchmark base)
common_benchmark_ec2 = argparse.ArgumentParser(add_help=False, parents=[common_benchmark])
common_benchmark_ec2.add_argument('--output', dest='output_dir', default="./output", type=str,
help='output directory for runs. [default: %(default)s]')
# common crankshaw options
common_client_bench = argparse.ArgumentParser(add_help=False)
common_client_bench.add_argument('--latency', action='store_true',
help='Compute average latency when running THE CRANKSHAW. [default: %(default)s]')
common_client_bench.add_argument('--test_index', action='store_true',
help='Test index inserts using triggers for THE CRANKSHAW. [default: %(default)s]')
# common ycsb options
common_ycsb_bench = argparse.ArgumentParser(add_help=False)
common_ycsb_bench.add_argument('--skip_rebuild', action='store_true',
help='Skip rebuilding ycsb before running the benchmark. [default: %(default)s]')
##################################
###### sub-command options #######
##################################
# launch
parser_launch = subparsers.add_parser('launch', help='Launch EC2 cluster',
parents=[common_cluster_ec2])
parser_launch.set_defaults(func=command_launch)
parser_launch.add_argument('--no_spot', dest='no_spot', action='store_true',
help='Don\'t use spot instances. [default: %(default)s]')
parser_launch.add_argument('--spot_price', dest="spot_price", type=float, default=1.5,
help="Spot price in $. [default: %(default)s]")
parser_launch.add_argument('--instance_type', dest="instance_type", type=str, default="cr1.8xlarge",
help="EC2 instance type. [default: %(default)s]")
parser_launch.add_argument('--placement_group', dest='placement_group', default="VELOX_CLUSTER",
help="EC2 placement group. [default: %(default)s]")
# claim
parser_claim = subparsers.add_parser('claim', help='Claim non-tagged instances as our own',
parents=[common_cluster_ec2])
parser_claim.set_defaults(func=command_claim)
# terminate
parser_terminate = subparsers.add_parser('terminate', help='Terminate the EC2 cluster and any matching instances',
parents=[common_cluster_ec2])
parser_terminate.set_defaults(func=command_terminate)
# rebuild
parser_rebuild = subparsers.add_parser('rebuild', help='Rebuild velox cluster',
parents=[common_cluster_ec2])
parser_rebuild.set_defaults(func=command_rebuild)
parser_rebuild.add_argument('--branch', '-b', dest="branch", default="master",
help='Branch to rebuild. [default: %(default)s]')
parser_rebuild.add_argument('--git_remote', dest="git_remote", default="git@github.com:amplab/velox.git",
help='Upstream git url. [default: %(default)s]')
parser_rebuild.add_argument('--deploy_key', dest="deploy_key", default=None,
help='Local path to upstream deploy key. [default: %(default)s]')
# install_ykit
parser_install_ykit = subparsers.add_parser('install_ykit', help='Install yourkit',
parents=[common_cluster_ec2])
parser_install_ykit.set_defaults(func=command_install_ykit)
# deploy zookeeper
parser_deploy_zk = subparsers.add_parser('deploy_zookeeper', help='Deploy zookeeper to all backend servers',
parents=[common_cluster_ec2])
parser_deploy_zk.add_argument('--zk_config', dest='zk_config', default="conf/zk_cluster.cfg.template", type=str,
help="Path to Zookeeper config file.")
parser_deploy_zk.set_defaults(func=command_deploy_zookeeper)
# deploy zookeeper locally
parser_deploy_zk_local = subparsers.add_parser('deploy_zookeeper_local', help='Deploy zookeeper to local tmp directory',
parents=[common_cluster_ec2])
parser_deploy_zk_local.add_argument('--zk_config', dest='zk_config', default="conf/zk_cluster.cfg.template", type=str,
help="Path to Zookeeper config file.")
parser_deploy_zk_local.set_defaults(func=command_deploy_zookeeper_local)
##################################
####### benchmark commands #######
##################################
parser_client_bench = subparsers.add_parser('client_bench', help='Run THE CRANKSHAW TEST on EC2',
parents=[common_cluster_ec2, common_benchmark_ec2, common_client_bench])
parser_client_bench.set_defaults(func=command_client_bench)
parser_client_bench_local = subparsers.add_parser('client_bench_local', help='Run THE CRANKSHAW TEST locally',
parents=[common_benchmark, common_client_bench])
parser_client_bench_local.add_argument('--zk_config', dest='zk_config', default="conf/zk_cluster.cfg.template", type=str,
help="Path to Zookeeper config file.")
parser_client_bench_local.set_defaults(func=command_client_bench_local)
parser_ycsb_bench = subparsers.add_parser('ycsb_bench', help='Run YCSB on EC2',
parents=[common_cluster_ec2, common_benchmark_ec2, common_ycsb_bench])
parser_ycsb_bench.set_defaults(func=command_ycsb_bench)
parser_ycsb_bench_local = subparsers.add_parser('ycsb_bench_local', help='Run YCSB locally',
parents=[common_benchmark, common_ycsb_bench])
parser_ycsb_bench_local.set_defaults(func=command_ycsb_bench_local)
# parse the args, and execute the sub-command
args = parser.parse_args()
args.func(args)
|
|
"""Requirements specific to SQLAlchemy's own unit tests.
"""
import sys
from sqlalchemy import exc
from sqlalchemy import util
from sqlalchemy.sql import text
from sqlalchemy.testing import exclusions
from sqlalchemy.testing.exclusions import against
from sqlalchemy.testing.exclusions import fails_if
from sqlalchemy.testing.exclusions import fails_on
from sqlalchemy.testing.exclusions import fails_on_everything_except
from sqlalchemy.testing.exclusions import LambdaPredicate
from sqlalchemy.testing.exclusions import NotPredicate
from sqlalchemy.testing.exclusions import only_if
from sqlalchemy.testing.exclusions import only_on
from sqlalchemy.testing.exclusions import skip_if
from sqlalchemy.testing.exclusions import SpecPredicate
from sqlalchemy.testing.exclusions import succeeds_if
from sqlalchemy.testing.requirements import SuiteRequirements
def no_support(db, reason):
return SpecPredicate(db, description=reason)
def exclude(db, op, spec, description=None):
return SpecPredicate(db, op, spec, description=description)
class DefaultRequirements(SuiteRequirements):
@property
def deferrable_or_no_constraints(self):
"""Target database must support deferrable constraints."""
return skip_if(
[
no_support("firebird", "not supported by database"),
no_support("mysql", "not supported by database"),
no_support("mssql", "not supported by database"),
]
)
@property
def check_constraints(self):
"""Target database must support check constraints."""
return exclusions.open()
@property
def enforces_check_constraints(self):
"""Target database must also enforce check constraints."""
return self.check_constraints + fails_on(
self._mysql_check_constraints_dont_exist,
"check constraints don't enforce on MySQL, MariaDB<10.2",
)
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def implicitly_named_constraints(self):
"""target database must apply names to unnamed constraints."""
return skip_if([no_support("sqlite", "not supported by database")])
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return skip_if(no_support("sqlite", "not supported by database"))
@property
def on_update_cascade(self):
"""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return skip_if(
["sqlite", "oracle"],
"target backend %(doesnt_support)s ON UPDATE CASCADE",
)
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return fails_on_everything_except(
"sqlite", "oracle", "+zxjdbc"
) + skip_if("mssql")
@property
def recursive_fk_cascade(self):
"""target database must support ON DELETE CASCADE on a self-referential
foreign key"""
return skip_if(["mssql"])
@property
def deferrable_fks(self):
"""target database must support deferrable fks"""
return only_on(["oracle"])
@property
def foreign_key_constraint_option_reflection_ondelete(self):
return only_on(["postgresql", "mysql", "sqlite", "oracle"])
@property
def foreign_key_constraint_option_reflection_onupdate(self):
return only_on(["postgresql", "mysql", "sqlite"])
@property
def comment_reflection(self):
return only_on(["postgresql", "mysql", "oracle"])
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return skip_if(
["firebird", "oracle", "mysql"], "not supported by database"
)
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return skip_if(
[
no_support("firebird", "not supported by database"),
no_support("oracle", "not supported by database"),
no_support("mssql", "not supported by database"),
no_support("sybase", "not supported by database"),
]
)
@property
def non_native_boolean_unconstrained(self):
"""target database is not native boolean and allows arbitrary integers
in it's "bool" column"""
return skip_if(
[
LambdaPredicate(
lambda config: against(config, "mssql"),
"SQL Server drivers / odbc seem to change "
"their mind on this",
),
LambdaPredicate(
lambda config: config.db.dialect.supports_native_boolean,
"native boolean dialect",
),
]
)
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return skip_if(["firebird", "mssql+mxodbc"], "not supported by driver")
@property
def no_quoting_special_bind_names(self):
"""Target database will quote bound parameter names, doesn't support
EXPANDING"""
return skip_if(["oracle"])
@property
def identity(self):
"""Target database must support GENERATED AS IDENTITY or a facsimile.
Includes GENERATED AS IDENTITY, AUTOINCREMENT, AUTO_INCREMENT, or other
column DDL feature that fills in a DB-generated identifier at
INSERT-time without requiring pre-execution of a SEQUENCE or other
artifact.
"""
return skip_if(
["firebird", "oracle", "postgresql", "sybase"],
"not supported by database",
)
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return skip_if(
["mssql", "firebird", self._sqlite_file_db], "not supported (?)"
)
@property
def temp_table_reflection(self):
return self.temporary_tables
@property
def reflectable_autoincrement(self):
"""Target database must support tables that can automatically generate
PKs assuming they were reflected.
this is essentially all the DBs in "identity" plus PostgreSQL, which
has SERIAL support. FB and Oracle (and sybase?) require the Sequence
to be explicitly added, including if the table was reflected.
"""
return skip_if(
["firebird", "oracle", "sybase"], "not supported by database"
)
@property
def insert_from_select(self):
return skip_if(["firebird"], "crashes for unknown reason")
@property
def fetch_rows_post_commit(self):
return skip_if(["firebird"], "not supported")
@property
def non_broken_binary(self):
"""target DBAPI must work fully with binary values"""
# see https://github.com/pymssql/pymssql/issues/504
return skip_if(["mssql+pymssql"])
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return skip_if(["oracle", "mssql"], "not supported by database/driver")
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
# adding mssql here since it doesn't support comparisons either,
# have observed generally bad behavior with binary / mssql.
return skip_if(["oracle", "mssql"], "not supported by database/driver")
@property
def tuple_in(self):
def _sqlite_tuple_in(config):
return against(
config, "sqlite"
) and config.db.dialect.dbapi.sqlite_version_info >= (3, 15, 0)
return only_on(["mysql", "postgresql", _sqlite_tuple_in])
@property
def independent_cursors(self):
"""Target must support simultaneous, independent database cursors
on a single connection."""
return skip_if(["mssql", "mysql"], "no driver support")
@property
def independent_connections(self):
"""
Target must support simultaneous, independent database connections.
"""
# This is also true of some configurations of UnixODBC and probably
# win32 ODBC as well.
return skip_if(
[
no_support(
"sqlite",
"independent connections disabled "
"when :memory: connections are used",
),
exclude(
"mssql",
"<",
(9, 0, 0),
"SQL Server 2005+ is required for "
"independent connections",
),
]
)
@property
def memory_process_intensive(self):
"""Driver is able to handle the memory tests which run in a subprocess
and iterate through hundreds of connections
"""
return skip_if(
[
no_support("oracle", "Oracle XE usually can't handle these"),
no_support("mssql+pyodbc", "MS ODBC drivers struggle"),
]
)
@property
def updateable_autoincrement_pks(self):
"""Target must support UPDATE on autoincrement/integer primary key."""
return skip_if(
["mssql", "sybase"], "IDENTITY columns can't be updated"
)
@property
def isolation_level(self):
return only_on(
("postgresql", "sqlite", "mysql", "mssql", "oracle"),
"DBAPI has no isolation level support",
) + fails_on(
"postgresql+pypostgresql",
"pypostgresql bombs on multiple isolation level calls",
)
def get_isolation_levels(self, config):
levels = set(config.db.dialect._isolation_lookup)
if against(config, "sqlite"):
default = "SERIALIZABLE"
levels.add("AUTOCOMMIT")
elif against(config, "postgresql"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
elif against(config, "mysql"):
default = "REPEATABLE READ"
levels.add("AUTOCOMMIT")
elif against(config, "mssql"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
elif against(config, "oracle"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
else:
raise NotImplementedError()
return {"default": default, "supported": levels}
@property
def autocommit(self):
"""target dialect supports 'AUTOCOMMIT' as an isolation_level"""
return self.isolation_level + only_if(
lambda config: "AUTOCOMMIT"
in self.get_isolation_levels(config)["supported"]
)
@property
def row_triggers(self):
"""Target must support standard statement-running EACH ROW triggers."""
return skip_if(
[
# no access to same table
no_support("mysql", "requires SUPER priv"),
exclude("mysql", "<", (5, 0, 10), "not supported by database"),
# huh? TODO: implement triggers for PG tests, remove this
no_support(
"postgresql",
"PG triggers need to be implemented for tests",
),
]
)
@property
def sequences_as_server_defaults(self):
"""Target database must support SEQUENCE as a server side default."""
return only_on(
"postgresql", "doesn't support sequences as a server side default."
)
@property
def sql_expressions_inserted_as_primary_key(self):
return only_if([self.returning, self.sqlite])
@property
def computed_columns_on_update_returning(self):
return self.computed_columns + skip_if("oracle")
@property
def correlated_outer_joins(self):
"""Target must support an outer join to a subquery which
correlates to the parent."""
return skip_if(
"oracle",
'Raises "ORA-01799: a column may not be '
'outer-joined to a subquery"',
)
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return only_on(
["postgresql", "mssql", "mysql"],
"Backend does not support UPDATE..FROM",
)
@property
def delete_from(self):
"""Target must support DELETE FROM..FROM or DELETE..USING syntax"""
return only_on(
["postgresql", "mssql", "mysql", "sybase"],
"Backend does not support DELETE..FROM",
)
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE (or DELETE) where the same table is
present in a subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as::
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return fails_if(
self._mysql_not_mariadb_103,
'MySQL error 1093 "Cant specify target table '
'for update in FROM clause", resolved by MariaDB 10.3',
)
@property
def savepoints(self):
"""Target database must support savepoints."""
return skip_if(
["sqlite", "sybase", ("mysql", "<", (5, 0, 3))],
"savepoints not supported",
)
@property
def savepoints_w_release(self):
return self.savepoints + skip_if(
["oracle", "mssql"],
"database doesn't support release of savepoint",
)
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return skip_if(["firebird"], "no schema support")
@property
def cross_schema_fk_reflection(self):
"""target system must support reflection of inter-schema foreign keys
"""
return only_on(["postgresql", "mysql", "mssql"])
@property
def implicit_default_schema(self):
"""target system has a strong concept of 'default' schema that can
be referred to implicitly.
basically, PostgreSQL.
"""
return only_on(["postgresql"])
@property
def unique_constraint_reflection(self):
return fails_on_everything_except(
"postgresql", "mysql", "sqlite", "oracle"
)
@property
def unique_constraint_reflection_no_index_overlap(self):
return (
self.unique_constraint_reflection
+ skip_if("mysql")
+ skip_if("oracle")
)
@property
def check_constraint_reflection(self):
return fails_on_everything_except(
"postgresql",
"sqlite",
"oracle",
self._mysql_and_check_constraints_exist,
)
@property
def indexes_with_expressions(self):
return only_on(["postgresql", "sqlite>=3.9.0"])
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return only_on(["sqlite", "oracle"]) + skip_if(self._sqlite_file_db)
@property
def temporary_views(self):
"""target database supports temporary views"""
return only_on(["sqlite", "postgresql"]) + skip_if(
self._sqlite_file_db
)
@property
def update_nowait(self):
"""Target database must support SELECT...FOR UPDATE NOWAIT"""
return skip_if(
["firebird", "mssql", "mysql", "sqlite", "sybase"],
"no FOR UPDATE NOWAIT support",
)
@property
def subqueries(self):
"""Target database must support subqueries."""
return skip_if(exclude("mysql", "<", (4, 1, 1)), "no subquery support")
@property
def ctes(self):
"""Target database supports CTEs"""
return only_on(
[
lambda config: against(config, "mysql")
and (
(
config.db.dialect._is_mariadb
and config.db.dialect._mariadb_normalized_version_info
>= (10, 2)
)
or (
not config.db.dialect._is_mariadb
and config.db.dialect.server_version_info >= (8,)
)
),
"postgresql",
"mssql",
"oracle",
"sqlite>=3.8.3",
]
)
@property
def ctes_with_update_delete(self):
"""target database supports CTES that ride on top of a normal UPDATE
or DELETE statement which refers to the CTE in a correlated subquery.
"""
return only_on(
[
"postgresql",
"mssql",
# "oracle" - oracle can do this but SQLAlchemy doesn't support
# their syntax yet
]
)
@property
def ctes_on_dml(self):
"""target database supports CTES which consist of INSERT, UPDATE
or DELETE *within* the CTE, e.g. WITH x AS (UPDATE....)"""
return only_if(["postgresql"])
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return only_if(["mysql", "sqlite", "postgresql+psycopg2", "mssql"])
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for INTERSECT",
)
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for EXCEPT",
)
@property
def order_by_col_from_union(self):
"""target database supports ordering by a column from a SELECT
inside of a UNION
E.g. (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
Fails on SQL Server
"""
return fails_if("mssql")
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite.
"""
return fails_if("sqlite")
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return fails_if(["sqlite", "oracle"])
@property
def offset(self):
"""Target database must support some method of adding OFFSET or
equivalent to a result set."""
return fails_if(["sybase"], "no support for OFFSET or equivalent")
@property
def sql_expression_limit_offset(self):
return (
fails_if(
["mysql"],
"Target backend can't accommodate full expressions in "
"OFFSET or LIMIT",
)
+ self.offset
)
@property
def window_functions(self):
return only_if(
["postgresql>=8.4", "mssql", "oracle", "sqlite>=3.25.0"],
"Backend does not support window functions",
)
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
def pg_prepared_transaction(config):
if not against(config, "postgresql"):
return False
with config.db.connect() as conn:
try:
num = conn.scalar(
text(
"select cast(setting AS integer) from pg_settings "
"where name = 'max_prepared_transactions'"
)
)
except exc.OperationalError:
return False
else:
return num > 0
return skip_if(
[
no_support("firebird", "no SA implementation"),
no_support("mssql", "two-phase xact not supported by drivers"),
no_support(
"oracle", "two-phase xact not implemented in SQLA/oracle"
),
no_support(
"drizzle", "two-phase xact not supported by database"
),
no_support(
"sqlite", "two-phase xact not supported by database"
),
no_support(
"sybase", "two-phase xact not supported by drivers/SQLA"
),
no_support(
"postgresql+zxjdbc",
"FIXME: JDBC driver confuses the transaction state, "
"may need separate XA implementation",
),
no_support(
"mysql",
"recent MySQL communiity editions have too many issues "
"(late 2016), disabling for now",
),
NotPredicate(
LambdaPredicate(
pg_prepared_transaction,
"max_prepared_transactions not available or zero",
)
),
]
)
@property
def two_phase_recovery(self):
return self.two_phase_transactions + (
skip_if("mysql", "crashes on most mariadb and mysql versions")
)
@property
def views(self):
"""Target database must support VIEWs."""
return skip_if("drizzle", "no VIEW support")
@property
def empty_strings_varchar(self):
"""
target database can persist/return an empty string with a varchar.
"""
return fails_if(
["oracle"], "oracle converts empty strings to a blank space"
)
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return fails_if(
["oracle"], "oracle converts empty strings to a blank space"
)
@property
def expressions_against_unbounded_text(self):
"""target database supports use of an unbounded textual field in a
WHERE clause."""
return fails_if(
["oracle"],
"ORA-00932: inconsistent datatypes: expected - got CLOB",
)
@property
def unicode_data(self):
"""target drive must support unicode data stored in columns."""
return skip_if([no_support("sybase", "no unicode driver support")])
@property
def unicode_connections(self):
"""
Target driver must support some encoding of Unicode across the wire.
"""
# TODO: expand to exclude MySQLdb versions w/ broken unicode
return skip_if(
[exclude("mysql", "<", (4, 1, 1), "no unicode connection support")]
)
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol names."""
# TODO: expand to exclude MySQLdb versions w/ broken unicode
return skip_if(
[
no_support("oracle", "FIXME: no support in database?"),
no_support("sybase", "FIXME: guessing, needs confirmation"),
no_support("mssql+pymssql", "no FreeTDS support"),
LambdaPredicate(
lambda config: against(config, "mysql+mysqlconnector")
and config.db.dialect._mysqlconnector_version_info > (2, 0)
and util.py2k,
"bug in mysqlconnector 2.0",
),
exclude(
"mysql", "<", (4, 1, 1), "no unicode connection support"
),
]
)
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes.
"""
return fails_on_everything_except(
"mysql", "sqlite+pysqlite", "sqlite+pysqlcipher", "sybase", "mssql"
)
@property
def implements_get_lastrowid(self):
return skip_if([no_support("sybase", "not supported by database")])
@property
def dbapi_lastrowid(self):
""""target backend includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return skip_if(
"mssql+pymssql", "crashes on pymssql"
) + fails_on_everything_except(
"mysql", "sqlite+pysqlite", "sqlite+pysqlcipher"
)
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return fails_on_everything_except(
"postgresql", "oracle", "firebird", "sqlite >= 3.30.0"
)
@property
def reflects_pk_names(self):
"""Target driver reflects the name of primary key constraints."""
return fails_on_everything_except(
"postgresql", "oracle", "mssql", "sybase", "sqlite"
)
@property
def nested_aggregates(self):
"""target database can select an aggregate from a subquery that's
also using an aggregate"""
return skip_if(["mssql", "sqlite"])
@property
def array_type(self):
return only_on(
[
lambda config: against(config, "postgresql")
and not against(config, "+pg8000")
and not against(config, "+zxjdbc")
]
)
@property
def json_type(self):
return only_on(
[
lambda config: against(config, "mysql")
and (
(
not config.db.dialect._is_mariadb
and against(config, "mysql >= 5.7")
)
or (
config.db.dialect._mariadb_normalized_version_info
>= (10, 2, 7)
)
),
"postgresql >= 9.3",
self._sqlite_json,
]
)
@property
def json_index_supplementary_unicode_element(self):
# for sqlite see https://bugs.python.org/issue38749
return skip_if(
[
lambda config: against(config, "mysql")
and config.db.dialect._is_mariadb,
"sqlite",
]
)
def _sqlite_file_db(self, config):
return against(config, "sqlite") and config.db.dialect._is_url_file_db(
config.db.url
)
def _sqlite_memory_db(self, config):
return against(
config, "sqlite"
) and not config.db.dialect._is_url_file_db(config.db.url)
def _sqlite_json(self, config):
if not against(config, "sqlite >= 3.9"):
return False
else:
with config.db.connect() as conn:
try:
return (
conn.scalar(
"""select json_extract('{"foo": "bar"}', """
"""'$."foo"')"""
)
== "bar"
)
except exc.DBAPIError:
return False
@property
def reflects_json_type(self):
return only_on(
[
lambda config: against(config, "mysql >= 5.7")
and not config.db.dialect._is_mariadb,
"postgresql >= 9.3",
"sqlite >= 3.9",
]
)
@property
def json_array_indexes(self):
return self.json_type + fails_if("+pg8000")
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return fails_on_everything_except("sqlite")
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return skip_if(
["mssql", "mysql", "firebird", "+zxjdbc", "oracle", "sybase"]
)
@property
def timestamp_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects but only
if TIMESTAMP is used."""
return only_on(["oracle"])
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
# does not work as of pyodbc 4.0.22
return fails_on("mysql+mysqlconnector") + skip_if("mssql+pyodbc")
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return skip_if(["oracle"])
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return skip_if(
["mssql", "mysql", "firebird", "+zxjdbc", "oracle", "sybase"]
)
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
# NOTE: this exclusion isn't used in current tests.
return exclusions.open()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return fails_if(
[
(
"sybase+pyodbc",
None,
None,
"Don't know how do get these values through "
"FreeTDS + Sybase",
),
("firebird", None, None, "Precision must be from 1 to 18"),
]
)
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
def broken_cx_oracle(config):
return (
against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver <= (6, 0, 2)
and config.db.dialect.cx_oracle_ver > (6,)
)
return fails_if(
[
("sqlite", None, None, "TODO"),
("firebird", None, None, "Precision must be from 1 to 18"),
("sybase+pysybase", None, None, "TODO"),
]
)
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return fails_if(
[
("oracle", None, None, "driver doesn't do this automatically"),
(
"firebird",
None,
None,
"database and/or driver truncates decimal places.",
),
]
)
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type."""
return fails_if(
[
(
"mysql",
None,
None,
"mysql FLOAT type only returns 4 decimals",
),
(
"firebird",
None,
None,
"firebird FLOAT type isn't high precision",
),
]
)
@property
def floats_to_four_decimals(self):
return fails_if(
[
("mysql+oursql", None, None, "Floating point error"),
(
"firebird",
None,
None,
"Firebird still has FP inaccuracy even "
"with only four decimal places",
),
(
"mssql+pyodbc",
None,
None,
"mssql+pyodbc has FP inaccuracy even with "
"only four decimal places ",
),
(
"mssql+pymssql",
None,
None,
"mssql+pymssql has FP inaccuracy even with "
"only four decimal places ",
),
(
"postgresql+pg8000",
None,
None,
"postgresql+pg8000 has FP inaccuracy even with "
"only four decimal places ",
),
(
"postgresql+psycopg2cffi",
None,
None,
"postgresql+psycopg2cffi has FP inaccuracy even with "
"only four decimal places ",
),
]
)
@property
def implicit_decimal_binds(self):
"""target backend will return a selected Decimal as a Decimal, not
a string.
e.g.::
expr = decimal.Decimal("15.7563")
value = e.scalar(
select([literal(expr)])
)
assert value == expr
See :ticket:`4036`
"""
# fixed for mysqlclient in
# https://github.com/PyMySQL/mysqlclient-python/commit/68b9662918577fc05be9610ef4824a00f2b051b0
def check(config):
if against(config, "mysql+mysqldb"):
# can remove once post 1.3.13 is released
try:
from MySQLdb import converters
from decimal import Decimal
return Decimal not in converters.conversions
except:
return True
return against(
config, "mysql+mysqldb"
) and config.db.dialect._mysql_dbapi_version <= (1, 3, 13)
return exclusions.fails_on(check, "fixed for mysqlclient post 1.3.13")
@property
def fetch_null_from_numeric(self):
return skip_if(("mssql+pyodbc", None, None, "crashes due to bug #351"))
@property
def duplicate_key_raises_integrity_error(self):
return fails_on("postgresql+pg8000")
def _has_pg_extension(self, name):
def check(config):
if not against(config, "postgresql"):
return False
count = config.db.scalar(
"SELECT count(*) FROM pg_extension "
"WHERE extname='%s'" % name
)
return bool(count)
return only_if(check, "needs %s extension" % name)
@property
def hstore(self):
return self._has_pg_extension("hstore")
@property
def btree_gist(self):
return self._has_pg_extension("btree_gist")
@property
def range_types(self):
def check_range_types(config):
if not against(
config, ["postgresql+psycopg2", "postgresql+psycopg2cffi"]
):
return False
try:
config.db.scalar("select '[1,2)'::int4range;")
return True
except Exception:
return False
return only_if(check_range_types)
@property
def oracle_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
"sqla_testing", "oracle_db_link"
),
"oracle_db_link option not specified in config",
)
@property
def postgresql_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
"sqla_testing", "postgres_test_db_link"
),
"postgres_test_db_link option not specified in config",
)
@property
def postgresql_jsonb(self):
return only_on("postgresql >= 9.4") + skip_if(
lambda config: config.db.dialect.driver == "pg8000"
and config.db.dialect._dbapi_version <= (1, 10, 1)
)
@property
def psycopg2_native_json(self):
return self.psycopg2_compatibility
@property
def psycopg2_native_hstore(self):
return self.psycopg2_compatibility
@property
def psycopg2_compatibility(self):
return only_on(["postgresql+psycopg2", "postgresql+psycopg2cffi"])
@property
def psycopg2_or_pg8000_compatibility(self):
return only_on(
[
"postgresql+psycopg2",
"postgresql+psycopg2cffi",
"postgresql+pg8000",
]
)
@property
def percent_schema_names(self):
return skip_if(
[
(
"+psycopg2",
None,
None,
"psycopg2 2.4 no longer accepts percent "
"sign in bind placeholders",
),
(
"+psycopg2cffi",
None,
None,
"psycopg2cffi does not accept percent signs in "
"bind placeholders",
),
("mysql", None, None, "executemany() doesn't work here"),
]
)
@property
def order_by_label_with_expression(self):
return fails_if(
[
(
"firebird",
None,
None,
"kinterbasdb doesn't send full type information",
),
("postgresql", None, None, "only simple labels allowed"),
("sybase", None, None, "only simple labels allowed"),
("mssql", None, None, "only simple labels allowed"),
]
)
def get_order_by_collation(self, config):
lookup = {
# will raise without quoting
"postgresql": "POSIX",
# note MySQL databases need to be created w/ utf8mb4 charset
# for the test suite
"mysql": "utf8mb4_bin",
"sqlite": "NOCASE",
# will raise *with* quoting
"mssql": "Latin1_General_CI_AS",
}
try:
return lookup[config.db.name]
except KeyError:
raise NotImplementedError()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return skip_if(
self._has_mysql_on_windows, "Not supported on MySQL + Windows"
)
@property
def mssql_freetds(self):
return only_on(["mssql+pymssql"])
@property
def ad_hoc_engines(self):
return exclusions.skip_if(
["oracle"],
"works, but Oracle just gets tired with "
"this much connection activity",
)
@property
def no_mssql_freetds(self):
return self.mssql_freetds.not_()
@property
def pyodbc_fast_executemany(self):
def has_fastexecutemany(config):
if not against(config, "mssql+pyodbc"):
return False
if config.db.dialect._dbapi_version() < (4, 0, 19):
return False
with config.db.connect() as conn:
drivername = conn.connection.connection.getinfo(
config.db.dialect.dbapi.SQL_DRIVER_NAME
)
# on linux this is something like 'libmsodbcsql-13.1.so.9.2'.
# on Windows this is something like 'msodbcsql17.dll'.
return "msodbc" in drivername
return only_if(
has_fastexecutemany, "only on pyodbc > 4.0.19 w/ msodbc driver"
)
@property
def python_fixed_issue_8743(self):
return exclusions.skip_if(
lambda: sys.version_info < (2, 7, 8),
"Python issue 8743 fixed in Python 2.7.8",
)
@property
def granular_timezone(self):
"""the datetime.timezone class, or SQLAlchemy's port, supports
seconds and microseconds.
SQLAlchemy ported the Python 3.7 version for Python 2, so
it passes on that. For Python 3.6 and earlier, it is not supported.
"""
return exclusions.skip_if(
lambda: sys.version_info >= (3,) and sys.version_info < (3, 7)
)
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return skip_if(
["oracle", "firebird"], "non-standard SELECT scalar syntax"
)
@property
def mysql_for_update(self):
return skip_if(
"mysql+mysqlconnector",
"lock-sensitive operations crash on mysqlconnector",
)
@property
def mysql_fsp(self):
return only_if("mysql >= 5.6.4")
@property
def mysql_fully_case_sensitive(self):
return only_if(self._has_mysql_fully_case_sensitive)
@property
def mysql_zero_date(self):
def check(config):
if not against(config, "mysql"):
return False
row = config.db.execute("show variables like 'sql_mode'").first()
return not row or "NO_ZERO_DATE" not in row[1]
return only_if(check)
@property
def mysql_non_strict(self):
def check(config):
if not against(config, "mysql"):
return False
row = config.db.execute("show variables like 'sql_mode'").first()
return not row or "STRICT_TRANS_TABLES" not in row[1]
return only_if(check)
@property
def mysql_ngram_fulltext(self):
def check(config):
return (
against(config, "mysql")
and not config.db.dialect._is_mariadb
and config.db.dialect.server_version_info >= (5, 7)
)
return only_if(check)
def _mariadb_102(self, config):
return (
against(config, "mysql")
and config.db.dialect._is_mariadb
and config.db.dialect._mariadb_normalized_version_info > (10, 2)
)
def _mysql_and_check_constraints_exist(self, config):
# 1. we have mysql / mariadb and
# 2. it enforces check constraints
if exclusions.against(config, "mysql"):
if config.db.dialect._is_mariadb:
norm_version_info = (
config.db.dialect._mariadb_normalized_version_info
)
return norm_version_info >= (10, 2)
else:
norm_version_info = config.db.dialect.server_version_info
return norm_version_info >= (8, 0, 16)
else:
return False
def _mysql_check_constraints_exist(self, config):
# 1. we dont have mysql / mariadb or
# 2. we have mysql / mariadb that enforces check constraints
return not exclusions.against(
config, "mysql"
) or self._mysql_and_check_constraints_exist(config)
def _mysql_check_constraints_dont_exist(self, config):
# 1. we have mysql / mariadb and
# 2. they dont enforce check constraints
return not self._mysql_check_constraints_exist(config)
def _mysql_not_mariadb_102(self, config):
return against(config, "mysql") and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 2)
)
def _mysql_not_mariadb_103(self, config):
return against(config, "mysql") and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 3)
)
def _mysql_not_mariadb_104(self, config):
return against(config, "mysql") and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 4)
)
def _has_mysql_on_windows(self, config):
return (
against(config, "mysql")
and config.db.dialect._detect_casing(config.db) == 1
)
def _has_mysql_fully_case_sensitive(self, config):
return (
against(config, "mysql")
and config.db.dialect._detect_casing(config.db) == 0
)
@property
def postgresql_utf8_server_encoding(self):
return only_if(
lambda config: against(config, "postgresql")
and config.db.scalar("show server_encoding").lower() == "utf8"
)
@property
def cxoracle6_or_greater(self):
return only_if(
lambda config: against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver >= (6,)
)
@property
def oracle5x(self):
return only_if(
lambda config: against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver < (6,)
)
@property
def computed_columns(self):
return skip_if(["postgresql < 12", "sqlite", "mysql < 5.7"])
@property
def python_profiling_backend(self):
return only_on([self._sqlite_memory_db])
@property
def computed_columns_stored(self):
return self.computed_columns + skip_if(["oracle", "firebird"])
@property
def computed_columns_virtual(self):
return self.computed_columns + skip_if(["postgresql", "firebird"])
@property
def computed_columns_default_persisted(self):
return self.computed_columns + only_if("postgresql")
@property
def computed_columns_reflect_persisted(self):
return self.computed_columns + skip_if("oracle")
|
|
"""
An full-blown application demoing a domain-specific usecase with Mayavi:
interactive design of coils.
This is example of electromagnetic coils design, an application is built to
enable a user to interactively position current loops while visualizing the
resulting magnetic field. For this purpose, it is best to use object-oriented
programming. Each current loop is written as an object (the `Loop` class), with
position, radius and direction attributes, and that knows how to calculate the
magnetic field it generates: its `Bnorm` is a property, that is recomputed when
the loop characteristic changes. These loop objects are available to the main
application class as a list. The total magnetic field created is the sum of
each individual magnetic field. It can be visualized via a Mayavi scene
embedded in the application class. As we use Traited objects for the current
loops, a dialog enabling modification of their attributes can be generated by
Traits and embedded in our application.
The full power of Mayavi is available to the application. Via the pipeline tree
view, the user can modify the visualization. Familiar interaction and movements
are possible in the figure. So is saving the visualization, or loading data. In
addition, as the visualization model, described by the pipeline, is separated
from the data that is visualized, contained in the data source, any
visualization module added by the user will update when coils are added or
changed.
Simpler examples of magnetic field visualization can be found on
:ref:`example_magnetic_field_lines` and :ref:`example_magnetic_field`.
The material required to understand this example is covered in section
:ref:`builing_applications`.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
# Major scientific library imports
import numpy as np
from scipy import linalg, special
# Enthought library imports:
from traits.api import HasTraits, Array, CFloat, List, \
Instance, on_trait_change, Property
from traitsui.api import Item, View, ListEditor, \
HSplit, VSplit
from mayavi.core.ui.api import EngineView, MlabSceneModel, \
SceneEditor
##############################################################################
# Module-level variables
# The grid of points on which we want to evaluate the field
X, Y, Z = np.mgrid[-0.15:0.15:20j, -0.15:0.15:20j, -0.15:0.15:20j]
# Avoid rounding issues :
f = 1e4 # this gives the precision we are interested by :
X = np.round(X * f) / f
Y = np.round(Y * f) / f
Z = np.round(Z * f) / f
##############################################################################
# A current loop class
class Loop(HasTraits):
""" A current loop class.
"""
#-------------------------------------------------------------------------
# Public traits
#-------------------------------------------------------------------------
direction = Array(float, value=(0, 0, 1), cols=3,
shape=(3,), desc='directing vector of the loop',
enter_set=True, auto_set=False)
radius = CFloat(0.1, desc='radius of the loop',
enter_set=True, auto_set=False)
position = Array(float, value=(0, 0, 0), cols=3,
shape=(3,), desc='position of the center of the loop',
enter_set=True, auto_set=False)
_plot = None
Bnorm = Property(depends_on='direction,position,radius')
view = View('position', 'direction', 'radius', '_')
#-------------------------------------------------------------------------
# Loop interface
#-------------------------------------------------------------------------
def base_vectors(self):
""" Returns 3 orthognal base vectors, the first one colinear to
the axis of the loop.
"""
# normalize n
n = self.direction / (self.direction**2).sum(axis=-1)
# choose two vectors perpendicular to n
# choice is arbitrary since the coil is symetric about n
if np.abs(n[0])==1 :
l = np.r_[n[2], 0, -n[0]]
else:
l = np.r_[0, n[2], -n[1]]
l /= (l**2).sum(axis=-1)
m = np.cross(n, l)
return n, l, m
@on_trait_change('Bnorm')
def redraw(self):
if hasattr(self, 'app') and self.app.scene._renderer is not None:
self.display()
self.app.visualize_field()
def display(self):
"""
Display the coil in the 3D view.
"""
n, l, m = self.base_vectors()
theta = np.linspace(0, 2*np.pi, 30)[..., np.newaxis]
coil = self.radius*(np.sin(theta)*l + np.cos(theta)*m)
coil += self.position
coil_x, coil_y, coil_z = coil.T
if self._plot is None:
self._plot = self.app.scene.mlab.plot3d(coil_x, coil_y, coil_z,
tube_radius=0.007, color=(0, 0, 1),
name='Coil')
else:
self._plot.mlab_source.set(x=coil_x, y=coil_y, z=coil_z)
def _get_Bnorm(self):
"""
returns the magnetic field for the current loop calculated
from eqns (1) and (2) in Phys Rev A Vol. 35, N 4, pp. 1535-1546; 1987.
"""
### Translate the coordinates in the coil's frame
n, l, m = self.base_vectors()
R = self.radius
r0 = self.position
r = np.c_[np.ravel(X), np.ravel(Y), np.ravel(Z)]
# transformation matrix coil frame to lab frame
trans = np.vstack((l, m, n))
r -= r0 #point location from center of coil
r = np.dot(r, linalg.inv(trans) ) #transform vector to coil frame
#### calculate field
# express the coordinates in polar form
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
rho = np.sqrt(x**2 + y**2)
theta = np.arctan2(x, y)
E = special.ellipe((4 * R * rho)/( (R + rho)**2 + z**2))
K = special.ellipk((4 * R * rho)/( (R + rho)**2 + z**2))
Bz = 1/np.sqrt((R + rho)**2 + z**2) * (
K
+ E * (R**2 - rho**2 - z**2)/((R - rho)**2 + z**2)
)
Brho = z/(rho*np.sqrt((R + rho)**2 + z**2)) * (
-K
+ E * (R**2 + rho**2 + z**2)/((R - rho)**2 + z**2)
)
# On the axis of the coil we get a divided by zero here. This returns a
# NaN, where the field is actually zero :
Brho[np.isnan(Brho)] = 0
B = np.c_[np.cos(theta)*Brho, np.sin(theta)*Brho, Bz ]
# Rotate the field back in the lab's frame
B = np.dot(B, trans)
Bx, By, Bz = B.T
Bx = np.reshape(Bx, X.shape)
By = np.reshape(By, X.shape)
Bz = np.reshape(Bz, X.shape)
Bnorm = np.sqrt(Bx**2 + By**2 + Bz**2)
# We need to threshold ourselves, rather than with VTK, to be able
# to use an ImageData
Bmax = 10 * np.median(Bnorm)
Bx[Bnorm > Bmax] = np.NAN
By[Bnorm > Bmax] = np.NAN
Bz[Bnorm > Bmax] = np.NAN
Bnorm[Bnorm > Bmax] = np.NAN
self.Bx = Bx
self.By = By
self.Bz = Bz
return Bnorm
##############################################################################
# The application object
class Application(HasTraits):
scene = Instance(MlabSceneModel, (), editor=SceneEditor())
# The mayavi engine view.
engine_view = Instance(EngineView)
coils = List(Instance(Loop, (), allow_none=False),
editor=ListEditor(style='custom'),
value=[ Loop(position=(0, 0, -0.05), ),
Loop(position=(0, 0, 0.05), ), ])
Bx = Array(value=np.zeros_like(X))
By = Array(value=np.zeros_like(X))
Bz = Array(value=np.zeros_like(X))
Bnorm = Array(value=np.zeros_like(X))
vector_field = None
def __init__(self, **traits):
HasTraits.__init__(self, **traits)
self.engine_view = EngineView(engine=self.scene.engine)
@on_trait_change('scene.activated,coils')
def init_view(self):
if self.scene._renderer is not None:
self.scene.scene_editor.background = (0, 0, 0)
for coil in self.coils:
coil.app = self
coil.display()
self.visualize_field()
def visualize_field(self):
self.Bx = np.zeros_like(X)
self.By = np.zeros_like(X)
self.Bz = np.zeros_like(X)
self.Bnorm = np.zeros_like(X)
self.scene.scene.disable_render = True
for coil in self.coils:
self.Bx += coil.Bx
self.By += coil.By
self.Bz += coil.Bz
self.Bnorm = np.sqrt(self.Bx**2 + self.By**2 + self.Bz**2)
if self.vector_field is None:
self.vector_field = self.scene.mlab.pipeline.vector_field(
X, Y, Z, self.Bx, self.By, self.Bz,
scalars=self.Bnorm,
name='B field')
vectors = self.scene.mlab.pipeline.vectors(self.vector_field,
mode='arrow', resolution=10,
mask_points=6, colormap='YlOrRd',
scale_factor=2*np.abs(X[0,0,0]
-X[1,1,1]) )
vectors.module_manager.vector_lut_manager.reverse_lut = True
vectors.glyph.mask_points.random_mode = False
self.scene.mlab.axes()
self.scp = self.scene.mlab.pipeline.scalar_cut_plane(
self.vector_field,
colormap='hot')
else:
# Modify in place the data source. The visualization will
# update automaticaly
self.vector_field.mlab_source.set(u=self.Bx, v=self.By, w=self.Bz,
scalars=self.Bnorm)
self.scene.scene.disable_render = False
view = View(HSplit(
VSplit(Item(name='engine_view',
style='custom',
resizable=True),
Item('coils', springy=True),
show_labels=False),
'scene',
show_labels=False),
resizable=True,
title='Coils...',
height=0.8,
width=0.8,
)
##############################################################################
if __name__ == '__main__':
app = Application()
app.configure_traits()
|
|
#!/usr/bin/env python
from pyomxplayer import OMXPlayer
import RPi.GPIO as GPIO
import pprint
import random
import socket
import struct
import sys
import time
import traceback
DRAWERS = 9
MCAST_GRP = '224.19.79.1'
MCAST_PORT = 9999
MOVIE_PATH = '/usr/share/lumiere/media'
MOVIE_SUFFIX = 'mp4'
MOVIE_LIST = [ '%s/%d.%s' % (MOVIE_PATH, n, MOVIE_SUFFIX) for n in range(1, 10) ]
PROJECTOR_SUPPLY_PIN = 26 #BOARD P1 pin number corresponds with GPIO7 on Rev2 RPi
PROJECTOR_ON = True
PROJECTOR_OFF = False
STATE_OPEN = 'o'
STATE_CLOSED = 'c'
_now_playing = -1
_omxplayer = None
def stop_movie():
global _omxplayer
global _now_playing
if _omxplayer != None:
print 'Stopping movie %d:%s' % (_now_playing+1, MOVIE_LIST[_now_playing])
if _omxplayer.isAlive():
_omxplayer.stop()
while _omxplayer.isAlive():
print '- Waiting for player to stop'
time.sleep(0.1)
_omxplayer.close()
_omxplayer = None
_now_playing =-1
def start_movie(index):
global _omxplayer
global _now_playing
if index >= len(MOVIE_LIST):
return -1
stop_movie()
print 'Starting movie %d:%s' % (index+1, MOVIE_LIST[index])
_omxplayer = OMXPlayer(MOVIE_LIST[index], args='-b', start_playback=True)
_now_playing = index
GPIO.output(PROJECTOR_SUPPLY_PIN, PROJECTOR_ON)
return index
def start_random_movie_from_list(l=[]):
try:
return start_movie(random.choice(l))
except IndexError:
pass
return -1
def is_movie_playing():
global _omxplayer
if _omxplayer != None:
return _omxplayer.isAlive()
return False
def current_movie_playing():
global _now_playing
return _now_playing
def main():
previous_state = [True] * DRAWERS
playlist = set()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
try:
host = '0.0.0.0'
timeval=struct.pack("2I", 0, 500000) # timeout 0.5s
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, timeval)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
sock.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(host))
sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(MCAST_GRP) + socket.inet_aton(host))
sock.bind((MCAST_GRP, MCAST_PORT))
except AttributeError:
pass
random.seed()
while 1:
drawer = -1
data = ''
try:
data, addr = sock.recvfrom(512)
#print '[%s] | received from %s: %s' % (time.ctime(), addr, data)
except socket.error, e:
if e.errno == 11:
if not is_movie_playing():
start_random_movie_from_list(list(playlist))
else:
print 'Expection: %s' % str(e)
continue
try:
if len(data) != 19:
print 'expected 19 bytes got %d' % len(data)
continue
cmd,args = data.split(':')
except ValueError, e:
print 'wrong data format: %s (%s)' % (data, str(e))
continue
if cmd == 's':
new_state = [bool(int(i)) for i in args.split(',')]
opened = {i for i in range(0, DRAWERS)
if new_state[i] != previous_state[i] and
not new_state[i]}
closed = {i for i in range(0, DRAWERS)
if new_state[i] != previous_state[i] and
new_state[i]}
start_random = False
start_new = False
if len(opened) > 0:
print 'New opened: %s' % (str(opened))
if len(closed) > 0:
print 'New closed: %s' % (str(closed))
try:
for i in closed:
if i in playlist:
playlist.remove(i)
if i == current_movie_playing():
stop_movie()
start_random = True
if len(playlist) == 0:
GPIO.output(PROJECTOR_SUPPLY_PIN, PROJECTOR_OFF)
if len(closed) > 0:
print 'playlist after closing: %s' % (list(playlist))
except IndexError:
pass
try:
for i in opened:
if i not in playlist:
playlist.add(i)
start_new = True
if len(opened) > 0:
print 'playlist after opening: %s' % (list(playlist))
except IndexError:
pass
try:
if start_new:
print 'starting new movie from opened list'
start_random_movie_from_list(list(opened))
elif start_random:
print 'starting random movie'
start_random_movie_from_list(list(playlist))
elif not is_movie_playing():
start_movie(random.choice(list(playlist)))
except IndexError:
pass
previous_state = list(new_state)
if __name__ == '__main__':
try:
GPIO.setmode(GPIO.BOARD)
GPIO.setup(PROJECTOR_SUPPLY_PIN, GPIO.OUT)
GPIO.output(PROJECTOR_SUPPLY_PIN, PROJECTOR_OFF)
main()
except KeyboardInterrupt:
print 'Exiting'
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
finally:
print 'Cleaning up GPIO settings'
stop_movie()
GPIO.output(PROJECTOR_SUPPLY_PIN, PROJECTOR_OFF)
#GPIO.cleanup()
|
|
"""Unit tests for the subject routes."""
import unittest
from unittest.mock import Mock, patch
from shared.model.report import Report
from shared.utils.type import SubjectId
from external.routes import (
delete_subject,
get_subject_measurements,
post_move_subject,
post_new_subject,
post_subject_attribute,
post_subject_copy,
)
from ...fixtures import METRIC_ID, REPORT_ID, REPORT_ID2, SUBJECT_ID, SUBJECT_ID2, create_report
class GetSubjectTest(unittest.TestCase):
"""Unit tests for the get subject measurements endpoint."""
def setUp(self):
"""Override to create a mock database fixture."""
self.database = Mock()
def test_get_subject_measurements(self):
"""Tests that the measurements for the requested metric are returned."""
# Mock reports collection
self.database.reports.find_one.return_value = {"subjects": {SUBJECT_ID: {"metrics": {METRIC_ID: {}}}}}
# Mock measurements collection
self.database.measurements.find_one.return_value = dict(start="1")
self.database.measurements.find.return_value = [dict(start="0"), dict(start="1")]
self.assertEqual(
dict(measurements=[dict(start="0"), dict(start="1")]), get_subject_measurements(SUBJECT_ID, self.database)
)
@patch("bottle.request")
class PostSubjectAttributeTest(unittest.TestCase):
"""Unit tests for the post subject report attribute route."""
def setUp(self):
"""Override to create a mock database fixture."""
self.database = Mock()
self.report = Report(
None,
dict(
_id="id",
report_uuid=REPORT_ID,
title="Report",
subjects={SUBJECT_ID: dict(name="subject1"), SUBJECT_ID2: dict(type="subject_type")},
),
)
self.database.reports.find.return_value = [self.report]
self.database.measurements.find.return_value = []
self.database.datamodels.find_one.return_value = dict(
_id="id", subjects=dict(subject_type=dict(name="subject2"))
)
self.email = "john@example.org"
self.database.sessions.find_one.return_value = dict(user="John", email=self.email)
def assert_delta(self, delta: str, subject_id: SubjectId, report: dict) -> None:
"""Check that the delta is correct."""
self.assertEqual(
dict(uuids=[REPORT_ID, subject_id], email=self.email, description=f"John changed the {delta}."),
report["delta"],
)
def test_post_subject_name(self, request):
"""Test that the subject name can be changed."""
request.json = dict(name="new name")
self.assertEqual(dict(ok=True), post_subject_attribute(SUBJECT_ID, "name", self.database))
self.database.reports.insert_one.assert_called_once_with(self.report)
updated_report = self.database.reports.insert_one.call_args[0][0]
self.assert_delta(
"name of subject 'subject1' in report 'Report' from 'subject1' to 'new name'", SUBJECT_ID, updated_report
)
def test_post_position_first(self, request):
"""Test that a subject can be moved to the top."""
request.json = dict(position="first")
self.assertEqual(dict(ok=True), post_subject_attribute(SUBJECT_ID2, "position", self.database))
self.database.reports.insert_one.assert_called_once_with(self.report)
updated_report = self.database.reports.insert_one.call_args[0][0]
self.assertEqual([SUBJECT_ID2, SUBJECT_ID], list(updated_report["subjects"].keys()))
self.assert_delta(
"position of subject 'subject2' in report 'Report' from '1' to '0'", SUBJECT_ID2, updated_report
)
def test_post_position_last(self, request):
"""Test that a subject can be moved to the bottom."""
request.json = dict(position="last")
self.assertEqual(dict(ok=True), post_subject_attribute(SUBJECT_ID, "position", self.database))
self.database.reports.insert_one.assert_called_once_with(self.report)
updated_report = self.database.reports.insert_one.call_args[0][0]
self.assertEqual([SUBJECT_ID2, SUBJECT_ID], list(updated_report["subjects"].keys()))
self.assert_delta(
"position of subject 'subject1' in report 'Report' from '0' to '1'", SUBJECT_ID, updated_report
)
def test_post_position_previous(self, request):
"""Test that a subject can be moved up."""
request.json = dict(position="previous")
self.assertEqual(dict(ok=True), post_subject_attribute(SUBJECT_ID2, "position", self.database))
self.database.reports.insert_one.assert_called_once_with(self.report)
updated_report = self.database.reports.insert_one.call_args[0][0]
self.assertEqual([SUBJECT_ID2, SUBJECT_ID], list(updated_report["subjects"].keys()))
self.assert_delta(
"position of subject 'subject2' in report 'Report' from '1' to '0'", SUBJECT_ID2, updated_report
)
def test_post_position_next(self, request):
"""Test that a subject can be moved down."""
request.json = dict(position="next")
self.assertEqual(dict(ok=True), post_subject_attribute(SUBJECT_ID, "position", self.database))
self.database.reports.insert_one.assert_called_once_with(self.report)
updated_report = self.database.reports.insert_one.call_args[0][0]
self.assertEqual([SUBJECT_ID2, SUBJECT_ID], list(updated_report["subjects"].keys()))
self.assert_delta(
"position of subject 'subject1' in report 'Report' from '0' to '1'", SUBJECT_ID, updated_report
)
def test_post_position_first_previous(self, request):
"""Test that moving the first subject up does nothing."""
request.json = dict(position="previous")
self.assertEqual(dict(ok=True), post_subject_attribute(SUBJECT_ID, "position", self.database))
self.database.reports.insert_one.assert_not_called()
def test_post_position_last_next(self, request):
"""Test that moving the last subject down does nothing."""
request.json = dict(position="next")
self.assertEqual(dict(ok=True), post_subject_attribute(SUBJECT_ID2, "position", self.database))
self.database.reports.insert_one.assert_not_called()
def test_post_unsafe_comment(self, request):
"""Test that comments are sanitized, since they are displayed as inner HTML in the frontend."""
request.json = dict(comment='Comment with script<script type="text/javascript">alert("Danger")</script>')
self.assertEqual(dict(ok=True), post_subject_attribute(SUBJECT_ID, "comment", self.database))
self.database.reports.insert_one.assert_called_once_with(self.report)
updated_report = self.database.reports.insert_one.call_args[0][0]
self.assert_delta(
"comment of subject 'subject1' in report 'Report' from '' to 'Comment with script'",
SUBJECT_ID,
updated_report,
)
class SubjectTest(unittest.TestCase):
"""Unit tests for adding and deleting subjects."""
def setUp(self):
"""Override to create a mock database fixture."""
self.database = Mock()
self.email = "jenny@example.org"
self.database.sessions.find_one.return_value = dict(user="Jenny", email=self.email)
self.report = create_report()
self.database.reports.find.return_value = [self.report]
self.database.measurements.find.return_value = []
self.database.datamodels.find_one.return_value = dict(
_id="id",
metrics=dict(metric_type=dict(name="Metric type")),
subjects=dict(subject_type=dict(name="Subject", description="")),
sources=dict(source_type=dict(name="Source type")),
)
def assert_delta(self, delta: str, uuids, report=None) -> None:
"""Check that the delta is correct."""
report = report or self.report
self.assertEqual(
dict(uuids=sorted(uuids), email=self.email, description=f"Jenny {delta}."),
report["delta"],
)
def test_add_subject(self):
"""Test that a subject can be added."""
result = post_new_subject(REPORT_ID, self.database)
self.assertTrue(result["ok"])
self.assertIn("new_subject_uuid", result)
subject_uuid = result["new_subject_uuid"]
updated_report = self.database.reports.insert_one.call_args[0][0]
self.assert_delta("created a new subject in report 'Report'", [REPORT_ID, subject_uuid], report=updated_report)
def test_copy_subject(self):
"""Test that a subject can be copied."""
result = post_subject_copy(SUBJECT_ID, REPORT_ID, self.database)
self.assertTrue(result["ok"])
self.database.reports.insert_one.assert_called_once()
updated_report = self.database.reports.insert_one.call_args[0][0]
inserted_subjects = updated_report["subjects"]
self.assertEqual(2, len(inserted_subjects))
subject_copy_uuid = list(self.report["subjects"].keys())[1]
self.assert_delta(
"copied the subject 'Subject' from report 'Report' to report 'Report'",
[REPORT_ID, subject_copy_uuid],
report=updated_report,
)
def test_delete_subject(self):
"""Test that a subject can be deleted."""
self.assertEqual(dict(ok=True), delete_subject(SUBJECT_ID, self.database))
updated_report = self.database.reports.insert_one.call_args[0][0]
self.assert_delta("deleted the subject 'Subject' from report 'Report'", [REPORT_ID, SUBJECT_ID], updated_report)
def test_move_subject(self):
"""Test that a subject can be moved to another report."""
subject = self.report["subjects"][SUBJECT_ID]
target_report = dict(_id="target_report", title="Target", report_uuid=REPORT_ID2, subjects={})
self.database.reports.find.return_value = [self.report, target_report]
self.assertEqual(dict(ok=True), post_move_subject(SUBJECT_ID, REPORT_ID2, self.database))
self.assertEqual({}, self.report["subjects"])
self.assertEqual((SUBJECT_ID, subject), next(iter(target_report["subjects"].items())))
expected_description = "moved the subject 'Subject' from report 'Report' to report 'Target'"
expected_uuids = [REPORT_ID, REPORT_ID2, SUBJECT_ID]
updated_reports = self.database.reports.insert_many.call_args[0][0]
for updated_report in updated_reports:
self.assert_delta(expected_description, expected_uuids, updated_report)
|
|
# -*- coding: utf-8 -*-
"""
Sample controller with all its actions protected.
"""
import tg
from tg import expose, flash, redirect, url, lurl, request
from tg.i18n import lazy_ugettext as l_
from molgears import model
from molgears.model import DBSession, PCompound, PHistory, PStatus, Tags, SCompound, SStatus, SFiles, SHistory, SPurity, LCompound
from molgears.model import Compound, Names, History, Efforts, User, Group, Projects, ResultsFP
from molgears.lib.base import BaseController
import transaction, os
from pkg_resources import resource_filename
from sqlalchemy import desc
from rdkit import Chem
from molgears.widgets.structure import checksmi
from datetime import datetime
from webhelpers import paginate
from tg.predicates import has_permission
from tg import cache
__all__ = ['SelectController']
#public_dirname = os.path.join(os.path.abspath(resource_filename('molgears', 'public')))
#img_dir = os.path.join(public_dirname, 'img')
#files_dir = os.path.join(public_dirname, 'files')
class SamplesController(BaseController):
"""Sample controller method"""
@expose('molgears.templates.users.samples.index')
def index(self, page=1, *args, **kw):
"""
Index controller for molecules
"""
pname = request.environ['PATH_INFO'].split('/')[1]
project = DBSession.query(Projects).filter(Projects.name==pname).first()
alltags =[tag for tag in DBSession.query(Tags).order_by('name').all() ]
from sqlalchemy import or_
compound = DBSession.query(Compound).filter(Compound.project.any(Projects.name==pname)).filter(or_(Compound.pcompounds != None, Compound.lcompounds != None))
dsc = True
tmpl = ''
selection = None
similarity = None
userid = request.identity['repoze.who.userid']
user = DBSession.query(User).filter_by(user_name=userid).first()
threshold = float(user.threshold)/100.0
items = user.items_per_page
order = "gid"
try:
if kw['search'] != u'':
search_clicked = kw['search']
else:
search_clicked = None
except Exception:
search_clicked = None
if kw:
delkw = []
for k, v in kw.iteritems():
if str(k) == 'desc' and str(v) != '1':
dsc = None
elif str(k) == 'order_by':
order = v
if str(k) != 'select' and str(k) != 'remove' and str(v) != u'':
tmpl += str(k) + '=' + str(v) + '&'
elif str(k) == 'select':
try:
if isinstance(kw['select'], basestring):
selection = [kw['select']]
else:
selection = [id for id in kw['select']]
except Exception:
selection = None
elif str(v) == u'':
delkw.append(k)
for k in delkw:
del kw[k]
if search_clicked:
try:
smiles = str(kw['smiles'])
except Exception:
smiles = None
pass
try:
method = str(kw['method'])
except Exception:
method = None
pass
if smiles:
if checksmi(smiles):
from razi.functions import functions
from razi.expression import TxtMoleculeElement
if method == 'similarity':
from razi.postgresql_rdkit import tanimoto_threshold
DBSession.execute(tanimoto_threshold.set(threshold))
query_bfp = functions.morgan_b(TxtMoleculeElement(smiles), 2)
constraint = Compound.morgan.tanimoto_similar(query_bfp)
tanimoto_sml = Compound.morgan.tanimoto_similarity(query_bfp).label('tanimoto')
limit = user.limit_sim
search = DBSession.query(Compound, tanimoto_sml).filter(constraint).filter(Compound.project.any(Projects.name==pname)).order_by(desc(tanimoto_sml)).limit(limit).all()
compound = ()
similarity = ()
for row in search:
compound += (row[0], )
similarity += (row[1], )
page_url = paginate.PageURL_WebOb(request)
currentPage = paginate.Page(compound, page, url=page_url, items_per_page=items)
return dict(length=len(compound), compound=currentPage.items, currentPage=currentPage, tmpl=tmpl, page='samples', pname=pname, alltags=alltags, similarity=similarity)
elif method == 'substructure':
constraint = Compound.structure.contains(smiles)
compound = DBSession.query(Compound).filter(constraint).filter(Compound.project.any(Projects.name==pname))
elif method == 'identity':
compound = DBSession.query(Compound).filter(Compound.structure.equals(smiles)).filter(Compound.project.any(Projects.name==pname))
else:
flash(l_(u'Smiles error'), 'warning')
redirect(request.headers['Referer'])
if kw.has_key('text_GID') and kw['text_GID'] !=u'':
try:
gid = int(kw['text_GID'])
compound = compound.filter_by(gid = gid )
except Exception as msg:
flash(l_(u'GID should be a number: %s' % msg), 'error')
redirect(request.headers['Referer'])
if kw.has_key('text_name') and kw['text_name'] !=u'':
compound = compound.filter(Compound.names.any(Names.name.like(kw['text_name'].strip().replace('*', '%'))))
if kw.has_key('text_creator') and kw['text_creator'] !=u'':
compound = compound.filter(Compound.creator.like(kw['text_creator'].replace('*', '%')))
if kw.has_key('text_notes') and kw['text_notes'] !=u'':
compound = compound.filter(Compound.notes.like(kw['text_notes'].replace('*', '%')))
if kw.has_key('date_from') and kw['date_from'] !=u'':
date_from = datetime.strptime(str(kw['date_from']), '%Y-%m-%d')
compound = compound.filter(Compound.create_date > date_from)
else:
date_from = None
if kw.has_key('date_to') and kw['date_to'] !=u'':
date_to = datetime.strptime(str(kw['date_to']), '%Y-%m-%d')
if date_from:
if date_to>date_from:
compound = compound.filter(Compound.create_date < date_to)
else:
flash(l_(u'The End date must be later than the initial'), 'error')
redirect(request.headers['Referer'])
else:
compound = compound.filter(Compound.create_date < date_to)
try:
tags = kw['text_tags']
except Exception:
tags = None
pass
if tags:
if isinstance(tags, basestring):
tagi = eval(tags)
if type(tagi) != type([]):
tagi = [int(tags)]
else:
tagi = [int(id) for id in tags]
# import sqlalchemy
compound = compound.filter(Compound.tags.any(Tags.id.in_(tagi)))
if selection and not search_clicked:
argv =''
for arg in selection:
argv += '/' + arg
if kw['akcja'] == u'edit':
if len(selection) == 1:
redirect('/%s/samples/edit%s' % (pname, argv))
else:
redirect('/%s/samples/multiedit/index%s' % (pname, argv))
elif kw['akcja'] == u'accept':
if len(selection) == 1:
redirect('/%s/samples/accept%s' % (pname, argv))
else:
redirect('/%s/samples/multiaccept/index%s' % (pname, argv))
elif kw['akcja'] == u'library':
if len(selection) == 1:
redirect('/%s/samples/library%s' % (pname, argv))
else:
redirect('/%s/samples/multilibrary/index%s' % (pname, argv))
elif kw['akcja'] == u'pdf':
redirect('/%s/samples/index/download%s/pdf/samples_selected.pdf' % (pname, argv))
elif kw['akcja'] == u'xls':
redirect('/%s/samples/download%s/xls/samples_selected.xls' % (pname, argv))
elif kw['akcja'] == u'txt':
redirect('/%s/samples/download%s/txt/samples_selected.txt' % (pname, argv))
elif kw['akcja'] == u'delete':
redirect('/%s/samples/remove%s' % (pname, argv))
else:
flash(l_(u'Action error'), 'error')
redirect(request.headers['Referer'])
else:
try:
akcja = kw['akcja']
except Exception:
akcja = None
if akcja:
if akcja == u'pdf':
redirect('/%s/samples/index/download/pdf/samples_all.pdf' % pname)
elif akcja == u'xls':
redirect('/%s/samples/download/xls/samples_all.xls' % pname)
elif akcja == u'txt':
redirect('/%s/samples/download/txt/samples_all.txt' % pname)
if dsc:
compound = compound.order_by(desc(order).nullslast())
else:
compound = compound.order_by((order))
page_url = paginate.PageURL_WebOb(request)
currentPage = paginate.Page(compound, page, url=page_url, items_per_page=items)
return dict(compound=currentPage.items, currentPage=currentPage, tmpl=tmpl, page='samples', pname=pname, alltags=alltags, similarity=similarity)
|
|
#!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
'''
base.py
common base for the commands execution framework. Units of work are defined as Operations
as found in other modules like unix.py. These units of work are then packaged up and executed
within a GpCommand. A GpCommand is just a common infrastructure for executing an Operation.
The general idea is that the application developer breaks the problem down into a set of
GpCommands that need to be executed. This class also provides a queue and set of workers
for executing this set of commands.
'''
from Queue import Queue,Empty
from threading import Thread
import os
import signal
import subprocess
import sys
import time
from gppylib import gplog
from gppylib import gpsubprocess
from pygresql.pg import DB
# paramiko prints deprecation warnings which are ugly to the end-user
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
import paramiko, getpass
logger=gplog.get_default_logger()
GPHOME=os.environ.get('GPHOME')
SRC_GPPATH=". %s/greenplum_path.sh;" % GPHOME
# Maximum retries if sshd rejects the connection due to too many
# unauthenticated connections.
SSH_MAX_RETRY=10
# Delay before retrying ssh connection, in seconds
SSH_RETRY_DELAY=.5
class WorkerPool(object):
"""TODO:"""
def __init__(self,numWorkers=16,items=None):
self.workers=[]
self.work_queue=Queue()
self.completed_queue=Queue()
self.num_assigned=0
if items is not None:
for item in items:
self.work_queue.put(item)
self.num_assigned += 1
for i in range(0,numWorkers):
w = Worker("worker%d" % i,self)
self.workers.append(w)
w.start()
self.numWorkers = numWorkers
self.logger = logger
###
def getNumWorkers(self):
return self.numWorkers
def getNextWorkItem(self,timeout=None):
return self.work_queue.get(block=True,timeout=timeout)
def addFinishedWorkItem(self,command):
self.completed_queue.put(command)
self.work_queue.task_done()
def addCommand(self,cmd):
self.logger.debug("Adding cmd to work_queue: %s" % cmd.cmdStr)
self.work_queue.put(cmd)
self.num_assigned += 1
def wait_and_printdots(self,command_count,quiet=True):
while self.completed_queue.qsize() < command_count:
time.sleep(1)
if not quiet:
sys.stdout.write(".")
sys.stdout.flush()
if not quiet:
print " "
self.join()
def print_progress(self, command_count):
while True:
num_completed = self.completed_queue.qsize()
num_completed_percentage = 0
if command_count:
num_completed_percentage = float(num_completed) / command_count
logger.info('%0.2f%% of jobs completed' % (num_completed_percentage * 100))
if num_completed >= command_count:
return
time.sleep(10)
def join(self):
self.work_queue.join()
return True
def joinWorkers(self):
for w in self.workers:
w.join()
def getCompletedItems(self):
completedList=[]
try:
while True:
item=self.completed_queue.get(False)
if item is not None:
completedList.append(item)
except Empty:
return completedList
return completedList #just to be sure
def check_results(self):
""" goes through all items in the completed_queue and throws an exception at the
first one that didn't execute successfully
throws ExecutionError
"""
try:
while True:
item=self.completed_queue.get(False)
if not item.get_results().wasSuccessful():
raise ExecutionError("Error Executing Command: ",item)
except Empty:
return
def empty_completed_items(self):
while not self.completed_queue.empty():
self.completed_queue.get(False)
def isDone(self):
#TODO: not sure that qsize() is safe
return (self.num_assigned == self.completed_queue.qsize())
def haltWork(self):
self.logger.debug("WorkerPool haltWork()")
for w in self.workers:
w.haltWork()
for i in range(0,self.numWorkers):
self.work_queue.put('dummy command')
class OperationWorkerPool(WorkerPool):
""" TODO: This is a hack! In reality, the WorkerPool should work with Operations, and
Command should be a subclass of Operation. Till then, we'll spoof the necessary Command
functionality within Operation. """
def __init__(self, numWorkers=16, operations=None):
if operations is not None:
for operation in operations:
self._spoof_operation(operation)
super(OperationWorkerPool, self).__init__(numWorkers, operations)
def check_results(self):
raise NotImplementedError("OperationWorkerPool has no means of verifying success.")
def _spoof_operation(self, operation):
operation.cmdStr = str(operation)
class Worker(Thread):
"""TODO:"""
pool=None
shouldStop=False
cmd=None
name=None
logger=None
def __init__(self,name,pool,timeout=5):
self.name=name
self.pool=pool
self.timeout=timeout
self.logger=logger
Thread.__init__(self)
def run(self):
try_count = 0
while True:
try:
if try_count == 5:
self.logger.debug("[%s] try and get work from queue..." % self.name)
try_count = 0
if self.shouldStop:
self.logger.debug('[%s] stopping' % self.name)
return
try:
self.cmd = self.pool.getNextWorkItem(timeout=self.timeout)
except TypeError:
# misleading exception raised during interpreter shutdown
return
if self.cmd is not None and not self.shouldStop:
self.logger.debug("[%s] got cmd: %s" % (self.name,self.cmd.cmdStr))
self.cmd.run()
self.logger.debug("[%s] finished cmd: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd=None
try_count = 0
else:
try_count += 1
if self.shouldStop:
self.logger.debug("[%s] stopping" % self.name)
return
except Empty:
if self.shouldStop:
self.logger.debug("[%s] stopping" % self.name)
return
except Exception,e:
self.logger.exception(e)
if self.cmd:
self.logger.debug("[%s] finished cmd with exception: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd=None
try_count = 0
def haltWork(self):
self.logger.debug("[%s] haltWork" % self.name)
self.shouldStop=True
# this was originally coded as
#
# if self.cmd is not None:
# self.cmd.interrupt()
# self.cmd.cancel()
#
# but as observed in MPP-13808, the worker thread's run() loop may set self.cmd to None
# past the point where the calling thread checks self.cmd for None, leading to a curious
# "'NoneType' object has no attribute 'cancel' exception" which may prevent the worker pool's
# haltWorkers() from actually halting all the workers.
#
c = self.cmd
if c is not None and isinstance(c, Command):
c.interrupt()
c.cancel()
def signalPassiveStop(self):
self.shouldStop=True
"""
TODO: consider just having a single interface that needs to be implemented for
describing work to allow the Workers to use it. This would allow the user
to better provide logic necessary. i.e. even though the user wants to
execute a unix command... how the results are interpretted are highly
application specific. So we should have a separate level of abstraction
for executing UnixCommands and DatabaseCommands from this one.
other things to think about:
-- how to support cancel
-- how to support progress
-- undo?
-- blocking vs. unblocking
"""
#--------------------------------NEW WORLD-----------------------------------
class CommandResult():
""" Used as a way to package up the results from a GpCommand
"""
#rc,stdout,stderr,completed,halt
def __init__(self,rc,stdout,stderr,completed,halt):
self.rc=rc
self.stdout=stdout
self.stderr=stderr
self.completed=completed
self.halt=halt
pass
def printResult(self):
res = "cmd had rc=%d completed=%s halted=%s\n stdout='%s'\n " \
"stderr='%s'" % (self.rc,str(self.completed), str(self.halt), self.stdout, self.stderr)
return res
def wasSuccessful(self):
if self.halt:
return False
if not self.completed:
return False
if self.rc != 0:
return False
return True
def __str__(self):
return self.printResult()
def split_stdout(self, how=':'):
"""
TODO: AK: This doesn't belong here if it pertains only to pg_controldata.
MPP-16318: Skip over discrepancies in the pg_controldata stdout, as it's
not this code's responsibility to judge the pg_controldata stdout. This is
especially true for 'immediate' shutdown, in which case, we won't even
care for WARNINGs or other pg_controldata discrepancies.
"""
for line in self.stdout.split('\n'):
ret = line.split(how, 1)
if len(ret) == 2:
yield ret
class ExecutionError(Exception):
def __init__(self,summary,cmd):
self.summary=summary
self.cmd=cmd
def __str__(self):
#TODO: improve dumping of self.cmd
return "ExecutionError: '%s' occured. Details: '%s' %s" %\
(self.summary,self.cmd.cmdStr,self.cmd.get_results().printResult())
#specify types of execution contexts.
LOCAL=1
REMOTE=2
RMI=3
NAKED=4
gExecutionContextFactory = None
#
# @param factory needs to have a createExecutionContext(self, execution_context_id, remoteHost, stdin, nakedExecutionInfo) function
#
def setExecutionContextFactory(factory):
global gExecutionContextFactory
gExecutionContextFactory = factory
def createExecutionContext(execution_context_id,remoteHost,stdin, nakedExecutionInfo=None):
if gExecutionContextFactory is not None:
return gExecutionContextFactory.createExecutionContext(execution_context_id, remoteHost, stdin)
elif execution_context_id == LOCAL:
return LocalExecutionContext(stdin)
elif execution_context_id == REMOTE:
if remoteHost is None:
raise Exception("Programmer Error. Specified REMOTE execution context but didn't provide a remoteHost")
return RemoteExecutionContext(remoteHost,stdin)
elif execution_context_id == RMI:
return RMIExecutionContext()
elif execution_context_id == NAKED:
if remoteHost is None:
raise Exception("Programmer Error. Specified NAKED execution context but didn't provide a remoteHost")
if nakedExecutionInfo is None:
raise Exception("Programmer Error. Specified NAKED execution context but didn't provide a NakedExecutionInfo")
return NakedExecutionContext(remoteHost, stdin, nakedExecutionInfo)
class ExecutionContext():
""" An ExecutionContext defines where and how to execute the Command and how to
gather up information that are the results of the command.
"""
propagate_env_map = {}
"""
Dict. mapping environment variables to their values. See gpcoverage.py for example usage.
"""
def __init__(self):
pass
def execute(self,cmd):
pass
def interrupt(self,cmd):
pass
def cancel(self,cmd):
pass
class LocalExecutionContext(ExecutionContext):
proc=None
halt=False
completed=False
def __init__(self,stdin):
ExecutionContext.__init__(self)
self.stdin = stdin
pass
def execute(self, cmd, wait=True):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
for k, v in self.__class__.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
# also propagate env from command instance specific map
for k, v in cmd.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
# executable='/bin/bash' is to ensure the shell is bash. bash isn't the
# actual command executed, but the shell that command string runs under.
self.proc = gpsubprocess.Popen(cmd.cmdStr, env=None, shell=True,
executable='/bin/bash',
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
if wait:
(rc,stdout_value,stderr_value)=self.proc.communicate2(input=self.stdin)
self.completed=True
cmd.set_results(CommandResult(
rc,"".join(stdout_value),"".join(stderr_value),self.completed,self.halt))
def cancel(self,cmd):
if self.proc:
try:
os.kill(self.proc.pid, signal.SIGTERM)
except OSError:
pass
def interrupt(self,cmd):
self.halt=True
if self.proc:
self.proc.cancel()
##########################################################################
# Naked Execution is used to run commands where ssh keys are not exchanged
class NakedExecutionInfo:
SFTP_NONE = 0
SFTP_PUT = 1
SFTP_GET = 2
def __init__(self, passwordMap, sftp_operation = SFTP_NONE, sftp_remote = None, sftp_local = None):
self.passwordMap = passwordMap
self.sftp_operation = sftp_operation
self.sftp_remote = sftp_remote
self.sftp_local = sftp_local
class NakedExecutionPasswordMap:
def __init__(self, hostlist):
self.hostlist = hostlist
self.mapping = dict()
self.unique_passwords = set()
self.complete = False
# this method throws exceptions on error to create a valid list
def discover(self):
for host in self.hostlist:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# TRY NO PASSWORD
try:
client.connect(host)
self.mapping[host] = None
client.close()
continue # next host
except Exception, e:
pass
try:
client.close()
except Exception, e:
pass
# TRY EXISTING PASSWORDS
foundit = False
for passwd in self.unique_passwords:
try:
client.connect(host, password=passwd)
foundit = True
self.mapping[host] = passwd
break
except Exception, e:
pass
if foundit:
continue
# ASK USER
foundit = False
for attempt in range(5):
try:
passwd = getpass.getpass(' *** Enter password for %s: ' % (host), sys.stderr)
client.connect(host, password=passwd)
foundit = True
self.mapping[host] = passwd
if passwd not in self.unique_passwords:
self.unique_passwords.add(passwd)
break
except Exception, e:
pass
try:
client.close()
except Exception, e:
pass
if not foundit:
raise Exception("Did not get a valid password for host " + host)
if len(self.mapping.keys()) == len(self.hostlist) and len(self.hostlist) > 0:
self.complete = True
class NakedExecutionContext(LocalExecutionContext):
def __init__(self,targetHost,stdin, nakedCommandInfo):
LocalExecutionContext.__init__(self, stdin)
self.targetHost=targetHost
self.passwordMap = nakedCommandInfo.passwordMap
self.sftp_operation = nakedCommandInfo.sftp_operation
self.sftp_remote = nakedCommandInfo.sftp_remote
self.sftp_local = nakedCommandInfo.sftp_local
self.client = None
def execute(self,cmd):
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.client.connect(self.targetHost, password=self.passwordMap.mapping[self.targetHost])
except paramiko.AuthenticationException:
self.client.close()
cmd.set_results(CommandResult(1,"","password validation on %s failed" % self.targetHost,False, False))
return
except Exception, e:
cmd.set_results(CommandResult(1,"","conection to host " + self.targetHost + " failed: " + e.__str__(),False, False))
return
if self.sftp_operation == NakedExecutionInfo.SFTP_NONE:
self.execute_ssh(cmd)
elif self.sftp_operation == NakedExecutionInfo.SFTP_PUT:
self.execute_sftp_put(cmd)
elif self.sftp_operation == NakedExecutionInfo.SFTP_GET:
self.execute_sftp_get(cmd)
else:
raise Exception("bad NakedExecutionInfo.sftp_operation")
def execute_ssh(self,cmd):
try:
stdin, stdout, stderr = self.client.exec_command(cmd.cmdStr)
rc = stdout.channel.recv_exit_status()
self.completed=True
cmd.set_results(CommandResult(rc,stdout.readlines(),stderr.readlines(),self.completed, self.halt))
stdin.close()
stdout.close()
stderr.close()
except Exception, e:
cmd.set_results(CommandResult(1,"",e.__str__(),False, False))
finally:
self.client.close()
def execute_sftp_put(self, cmd):
ftp = None
try:
ftp = self.client.open_sftp()
ftp.put(self.sftp_local, self.sftp_remote)
self.completed=True
cmd.set_results(CommandResult(0,"","",self.completed, self.halt))
except Exception, e:
cmd.set_results(CommandResult(1,"",e.__str__(),False, False))
finally:
ftp.close()
self.client.close()
def execute_sftp_get(self, cmd):
ftp = None
try:
ftp = self.client.open_sftp()
ftp.get(self.sftp_remote, self.sftp_local)
self.completed=True
cmd.set_results(CommandResult(0,"","",self.completed, self.halt))
except Exception, e:
cmd.set_results(CommandResult(1,"",e.__str__(),False, False))
finally:
ftp.close()
self.client.close()
def interrupt(self, cmd):
self.halt=True
self.client.close()
cmd.set_results(CommandResult(1,"","command on host " + self.targetHost + " interrupted ", False, False))
def cancel(self, cmd):
self.client.close()
cmd.set_results(CommandResult(1,"","command on host " + self.targetHost + " canceled ", False, False))
class RemoteExecutionContext(LocalExecutionContext):
trail = set()
"""
Leaves a trail of hosts to which we've ssh'ed, during the life of a particular interpreter.
"""
def __init__(self,targetHost,stdin):
LocalExecutionContext.__init__(self, stdin)
self.targetHost=targetHost
pass
def execute(self,cmd):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
for k, v in self.__class__.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
self.__class__.trail.add(self.targetHost)
# also propagate env from command instance specific map
for k, v in cmd.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
# Escape " for remote execution otherwise it interferes with ssh
cmd.cmdStr = cmd.cmdStr.replace('"', '\\"')
cmd.cmdStr="ssh -o 'StrictHostKeyChecking no' %s \"%s %s\"" % (self.targetHost,SRC_GPPATH,cmd.cmdStr)
LocalExecutionContext.execute(self,cmd)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd)
pass
def __retry(self, cmd, count=0):
if count == SSH_MAX_RETRY:
return
time.sleep(SSH_RETRY_DELAY)
LocalExecutionContext.execute(self, cmd)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd, count + 1)
class RMIExecutionContext(ExecutionContext):
""" Leave this as a big old TODO: for now. see agent.py for some more details"""
def __init__(self):
ExecutionContext.__init__(self)
raise Exception("RMIExecutionContext - Not implemented")
pass
class Command:
""" TODO:
"""
name=None
cmdStr=None
results=None
exec_context=None
propagate_env_map={} # specific environment variables for this command instance
def __init__(self,name,cmdStr,ctxt=LOCAL,remoteHost=None,stdin=None,nakedExecutionInfo=None):
self.name=name
self.cmdStr=cmdStr
self.exec_context=createExecutionContext(ctxt,remoteHost,stdin=stdin,nakedExecutionInfo=nakedExecutionInfo)
self.remoteHost=remoteHost
def __str__(self):
if self.results:
return "%s cmdStr='%s' had result: %s" % (self.name,self.cmdStr,self.results)
else:
return "%s cmdStr='%s'" % (self.name,self.cmdStr)
# Start a process that will execute the command but don't wait for
# it to complete. Return the Popen object instead.
def runNoWait(self):
faultPoint = os.getenv('GP_COMMAND_FAULT_POINT')
if not faultPoint or (self.name and not self.name.startswith(faultPoint)):
self.exec_context.execute(self, wait=False)
return self.exec_context.proc
def run(self,validateAfter=False):
faultPoint = os.getenv('GP_COMMAND_FAULT_POINT')
if not faultPoint or (self.name and not self.name.startswith(faultPoint)):
self.exec_context.execute(self)
else:
# simulate error
self.results = CommandResult(1,'Fault Injection','Fault Injection' ,False,True)
if validateAfter:
self.validate()
pass
def set_results(self,results):
self.results=results
def get_results(self):
return self.results
def get_stdout_lines(self):
return self.results.stdout.splitlines()
def get_stderr_lines(self):
return self.results.stderr.splitlines()
def cancel(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.cancel(self)
def interrupt(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.interrupt(self)
def was_successful(self):
if self.results is None:
return False
else:
return self.results.wasSuccessful()
def validate(self,expected_rc=0):
"""Plain vanilla validation which expects a 0 return code."""
if self.results.rc != expected_rc:
raise ExecutionError("non-zero rc: %d" % self.results.rc, self)
class SQLCommand(Command):
"""Base class for commands that execute SQL statements. Classes
that inherit from SQLCOmmand should set cancel_conn to the pygresql
connection they wish to cancel and check self.cancel_flag."""
def __init__(self,name):
Command.__init__(self, name, cmdStr=None)
self.cancel_flag = False
self.cancel_conn = None
def run(self,validateAfter=False):
raise ExecutionError("programmer error. implementors of SQLCommand must implement run()", self)
def interrupt(self):
# No execution context for SQLCommands
pass
def cancel(self):
# assignment is an atomic operation in python
self.cancel_flag = True
# if self.conn is not set we cannot cancel.
if self.cancel_conn:
DB(self.cancel_conn).cancel()
def run_remote_commands(name, commands):
"""
"""
cmds = {}
pool = WorkerPool()
for host, cmdStr in commands.items():
cmd = Command(name=name, cmdStr=cmdStr, ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
cmds[host] = cmd
pool.join()
pool.check_results()
return cmds
|
|
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name: Spondulas
# Purpose: This tool is used to retrieve malicious web pages for analysis
#
# Author: Bart Hopper
#
# Created: 03/02/2012
# Licence: FreeBSD
# Copyright (c) 2012, Bart Hopper
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#-------------------------------------------------------------------------------
################################################################################
# Todo/Possible features:
################################################################################
# Add support for additional useragents
# Allow random selection of user agents?
# Allow an external file list of user agents?
# Automated WHOIS lookup added to -links file
# hash of payloads
# PDF reports
# Add shellcode detection
################################################################################
#####################
# Imports #
#####################
import argparse
import gzip
import hashlib
import os
import re
import socket
import ssl
import string
import sys
import threading
import time
#########################
# Global Variables #
#########################
decoded = 0 # Flag to indicate either a chunked or gzipped file
results = '' # Results of query or parse. May be repurposed
version_id = '1.0.1'
#-------------------------------#
# Output related variables #
#-------------------------------#
address_links = '' # Stores the address links retrieved from the target web page
autolog = 0 # Automatically generates filenames and creates and investigation file
css_links = '' # Stores the Cascading Style Sheets discovered when parsing
forms = '' # Stores Form information discovered when parsing
image_links = '' # Stores image links discovered when parsing
inputfile = '' # A local file to parse for links
ip_address = '' # IP address resolved from query
linksfile = '' # A file to list all links found when parsing
outputfile = '' # The data retrieved from a remote host. This is repurposed for input_mode
nextfile = '' # Next filename for autolog
script_links = '' # Collection of all scripts found while parsing
was_redirected = '' # Flag to indicate if a redirection was detected
#-------------------------------#
# Request related variables #
#-------------------------------#
ajax = [] # Array to store AJAX queries
cookies = '' # Stores the cookies discovered when parsing
cookie_array = [] # Stores Cookies for investigation report
persistent = 0 # Boolean to keep the connection open for AJAX/websockets
port = 0
referrer = ''
request = ''
socksport = 0
SSL = ''
target_url = ''
timeout = 30
webrequest = {};
#-------------------------------#
# Timer related variables #
#-------------------------------#
days = 0
hours = 0
minutes = 0
monitormode = 0
seconds = 0
time_calculated = 0
total_seconds = 0
#################################
# Begin Function Definitions #
#################################
def create_investigation_filename():
'''Creates the investigation filename'''
a = time.localtime()
month = '{:02d}'.format(a.tm_mon)
mday = '{:02d}'.format(a.tm_mday)
filename = str(a.tm_year) + '-' + month + '-' + mday + '.txt'
return filename
def create_report():
'''Creates the report for the screen and links file.'''
outfile = open(linksfile,'w')
if(len(inputfile) > 0):
print('\nFile Processed: '+inputfile +'\n\n')
else:
print('\nTarget URL: '+ webrequest['host'] + webrequest['resource'])
outfile.write('\nTarget URL: '+ webrequest['host'] + webrequest['resource'] + '\n')
print('IP address: '+ip_address)
outfile.write('IP address: '+ip_address+'\n')
if(len(was_redirected)):
outfile.write('Redirected: '+was_redirected+'\n')
print('Referrer: '+ webrequest['referrer'])
outfile.write('Referrer: '+ webrequest['referrer'] + '\n')
# Create a string with the current date
a = time.localtime()
a2 = str(a[0]) + '-' + str('%02d' % a[1]) + '-' + str('%02d' % a[2]) + ' '
a2 += str(a[3]) + ':' + str('%02d' % a[4]) + ':' + str('%02d' % a[5])
print('Date/Time: ' + a2)
outfile.write('Date/Time: ' + a2 + '\n')
print('Output File: ' + outputfile)
outfile.write('Output File: ' + outputfile + '\n')
print('Links File: ' + linksfile)
outfile.write('Links File: ' + linksfile + '\n')
print('\n')
outfile.write('\n')
if (len(address_links)):
print('\nAddress Links')
outfile.write('\nAddress Links\n')
print('-'*20)
outfile.write('-'*20 + '\n')
print(address_links)
outfile.write(address_links + '\n')
if (len(cookies)):
print('\nCookies')
outfile.write('\nCookies\n')
print('-'*20)
outfile.write('-'*20 + '\n')
print(cookies)
outfile.write(cookies + '\n')
if (len(css_links)):
print('\nCascading Style Sheets')
outfile.write('\nCascading Style Sheets\n')
print('-'*20)
outfile.write('-'*20 + '\n')
print(css_links)
outfile.write(css_links + '\n')
if (len(forms)):
print('\nForms')
outfile.write('\nForms\n')
print('-'*20)
outfile.write('-'*20 + '\n')
print(forms)
outfile.write(forms + '\n')
if (len(image_links)):
print('\nImage Links')
outfile.write('\nImage Links\n')
print('-'*20)
outfile.write('-'*20 + '\n')
print(image_links)
outfile.write(image_links + '\n')
if (len(script_links)):
print('\nScript Links')
outfile.write('\nScript Links\n')
print('-'*20)
outfile.write('-'*20 + '\n')
print(script_links)
outfile.write(script_links + '\n')
sys.stdout.flush()
outfile.flush()
outfile.close()
sys.exit()
def create_request():
'''Creates the http request to send to the target machine'''
request =[]
request.append(webrequest['request_type']+
' '+ webrequest['resource']+ ' HTTP/1.1')
request.append('Host: '+webrequest['host'])
request.append('User-Agent: '+webrequest['user_agent'])
request.append('Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
request.append('Accept-Language: en-us,en;q=0.5')
request.append('Accept-Encoding: gzip, deflate')
request.append('Connection: keep-alive')
if(webrequest['referrer']):
request.append('Referer: '+webrequest['referrer'])
if(webrequest['cookies']):
request.append(webrequest['cookies'])
if(webrequest.get('content_length')):
request.append('Content-Length: '+str(webrequest['content_length']))
# Add more stuff here
if(webrequest.get('postvars')):
request.append('\n'+webrequest['postvars'])
print('\n')
r = ''
for i in request:
r += (i + '\r\n')
r += '\r\n'
return r
def dechunk(a):
'''This function removes the segment lengths and fixes "Chunked" files'''
global decoded
dechunked = ''
chunked = a.find(bytes('Transfer-Encoding: chunked','latin'))
if(chunked > 0):
decoded = 1
first = a.find(bytes('\x0D\x0A\x0D\x0A','latin'))
first += 4
dechunked += a[:first].decode()
dechunked = dechunked.encode()
end = a.find(bytes('\x0D\x0A','latin'),first)
chunklength = a[first:end]
end += 2
chunklength = int(chunklength,16)
while chunklength:
dechunked += a[end:end+chunklength]
first = end+chunklength+2
end = a.find(bytes('\x0D\x0A','latin'),first)
chunklength = a[first:end]
end += 2
chunklength = int(chunklength,16)
else:
dechunked = a
return dechunked
def demangle(response):
'''Fix the escaping present in a bytes object'''
if(response[0:2] == "b'"):
response = response[2:]
response = response[:-1]
endline = bytes('\x5C\x72\x5C\x6E','latin')
newline = bytes('\x0D\x0A','latin')
response = response.replace(endline,newline)
oldtab = bytes('\x5C\x74','latin')
newtab = bytes('\x09','latin')
response = response.replace(oldtab,newtab)
oldapos = bytes('\x5C\x27','latin')
newapos = bytes('\x27','latin')
response = response.replace(oldapos,newapos)
oldcr = bytes('\x5C\x0A','latin')
newcr = bytes('\x0A','latin')
response = response.replace(oldcr,newcr)
return response
def generate_firefox_ua_string():
'''Creates a Firefox Useragent string based on OS and Firefox version.'''
print('\nLet\'s generate an Firefox user agent string....\n')
ua_components = ['Mozilla/5.0 (',
['Windows NT 5.1;',
'Windows NT 5.2;',
'Windows NT 6.0;',
'Windows NT 6.1;',
'Macintosh; Intel Mac OS X 10.6;',
'X11; Linux i686;',
'X11; Linux x86_64;',
'X11; Linux i686 on x86_64;',
'Android; Mobile;',
'Android; Tablet;'
],
[') Gecko/20100101 Firefox/5.0',
') Gecko/20110524 Firefox/5.0a2',
') Gecko/20100101 Firefox/6.0',
') Gecko/20110612 Firefox/6.0a2',
') Gecko/20100101 Firefox/9.0',
') Gecko/20100101 Firefox/9.0.1',
') Gecko/2012010317 Firefox/10.0a4'
') Gecko/20120421 Firefox/11.0'
') Gecko/20120403211507 Firefox/12.0'
') Gecko/20120405 Firefox/14.0a1'
') Gecko/20120427 Firefox/15.0a1'
]
]
print('\nSelect OS version:')
print('------------------')
os_version = -1
while((int(os_version) < 0) or (int(os_version) > len(ua_components[1])-1)):
for i in range(0,len(ua_components[1])):
print(str(i)+': '+ua_components[1][i])
os_version = input('\nPlease select: ')
if(os_version == ''):
os_version = -1
print('Select Firefox version:')
print('------------------')
firefox_version = -1
while((int(firefox_version) < 0) or(int(firefox_version) > len(ua_components[2])-1)):
for i in range(0,len(ua_components[2])):
print(str(i)+': '+ua_components[2][i])
firefox_version = input('\nPlease select: ')
if(firefox_version == ''):
firefox_version = -1
ua = ua_components[0] + \
ua_components[1][int(os_version)] + \
ua_components[2][int(firefox_version)]
return(ua)
def generate_ie_ua_string():
'''Creates an Internet Explorer Useragent string based on OS and IE version.'''
print('\nLet\'s generate an Internet Explorer user agent string....\n')
ua_components = ['Mozilla/',
['4.0 (compatible; MSIE 6.0;',
'4.0 (compatible; MSIE 7.0;',
'5.0 (compatible; MSIE 8.0;',
'5.0 (compatible; MSIE 9.0;',
'5.0 (compatible; MSIE 10.0;',
'5.0 (compatible; MSIE 10.6;'],
[['Windows XP','Windows NT 5.1)'],
['Windows Server 2003/XP 64-bit','Windows NT 5.2)'],
['Windows Vista','Windows NT 6.0)'],
['Windows 7','Windows NT 6.1)']
]
]
print('Select IE version:')
print('------------------')
ie_version = -1
while((int(ie_version) < 0) or(int(ie_version) > len(ua_components[1])-1)):
for i in range(0,len(ua_components[1])):
print(str(i)+': '+ua_components[1][i])
ie_version = input('\nPlease select: ')
if(ie_version == ''):
ie_version = -1
print('\nSelect OS version:')
print('------------------')
os_version = -1
while((int(os_version) < 0) or (int(os_version) > len(ua_components[2])-1)):
for i in range(0,len(ua_components[2])):
print(str(i)+': '+ua_components[2][i][0])
os_version = input('\nPlease select: ')
if(os_version == ''):
os_version = -1
ua = ua_components[0] + \
ua_components[1][int(ie_version)] + \
ua_components[2][int(os_version)][1]
return(ua)
def get_choices(choices,label):
'''Creates menus for the program'''
print('\n'+label)
print('-'*len(label))
for i in range(0,len(choices)):
print(str(i) + '. ' + choices[i][0])
choice = -1
while((choice < 0) or (choice > (len(choices)-1))):
temp = input('\nSelect: ')
if(temp == ''):
choice = -1
continue
if(temp.isnumeric()):
choice = int(temp)
else:
choice = -1
choices[int(choice)][1]()
def get_cookies():
'''Allows input of cookies to submit to the target web site.'''
print('\nCookies are used to track state on the same web site.')
print('Enter any cookies that were set for this web site...\n')
print('Cookies should be in the format: cookie1=value1; cookie2=value2\n')
print('Enter each line separately. Press enter on a blank line to finish entering\n')
answer = ''
response = ''
while(len(answer) == 0):
answer = input('Cookies: ')
if(len(answer)==0): break
response += "Cookie: " + answer +'\n'
answer = ''
webrequest['cookies'] = response[:-1]
def get_default_user_agent():
'''Selects the default browser Useragent string.'''
# Todo: retrieve default user agent from a configuration file
return 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)'
def get_input_user_agent():
'''Allow the user to input a User agent'''
a = input('User Agent: ')
return a
def get_link_file(outfile):
'''Creates a filename for links file based on the outputfile filename'''
global linksfile
if(len(linksfile)>0):
return
else:
a = ''
fileparts = outfile.rsplit('.',2)
fileparts.insert(1,'.')
fileparts[0] = fileparts[0] + '-links'
a = a.join(fileparts)
linksfile = a
def get_next_file():
'''Looks in the current directory for the ceiling filename'''
a = os.listdir()
b = []
for i in a:
if re.findall('^\d\d\d\.txt',i):
b.append(i)
if(len(b)==0):
return '{:03d}'.format(1)+'.txt'
b.sort(reverse=1)
nextfile = b[0]
nextfile = nextfile.replace('.txt','')
nextfile = int(nextfile)
nextfile += 1
return '{:03d}'.format(nextfile)+'.txt'
def get_options():
'''Processes the commandline options'''
global autolog
global inputfile
global linksfile
global monitormode
global outputfile
global persistent
global referrer
global request
global socksport
global target_url
global timeout
global total_seconds
parser = argparse.ArgumentParser(prog='spondulas',prefix_chars='-/',description='A program to retrieve web pages and parse the links',version='Beta '+version_id)
parser.add_argument('-hh', help='Verbose Help',dest='verbosehelp',action='store_true',default='false')
parser.add_argument('-a', '--autolog', help='Enable autogeneration of outputfiles and create an investigation file',dest='autolog',action='store_const',const=1)
parser.add_argument('-i', '--input', help='The source file',dest='infile',metavar='InputFile',default='')
parser.add_argument('-l', '--link', help='The file used to store the links retrieved',dest='linksfile',metavar='LinkFile',default='')
parser.add_argument('-m', '--monitor', help='Enables site monitor mode. This polls a site for changes. Use cautiously!',dest='monitormode',action='store_const',const=1)
parser.add_argument('-ms', '--monitor-seconds', help='Sleep seconds for monitor mode',dest='total_seconds',metavar='seconds',type=int,default='0')
parser.add_argument('-o', '--output', help='The file used to store the page retrieved',dest='outputfile',metavar='OutputFile',default='')
parser.add_argument('-p', '--persistent', help='Hold a persistent connection for websockets/AJAX',dest='persistent',action='store_const',const=1)
parser.add_argument('-r', '--request', help='The request type either GET or POST',choices=('GET','POST'), dest='request',metavar='request',default='GET')
parser.add_argument('-ref', '--referrer', help='The URL referring you to the new URL',dest='referrer',metavar='URL',default='')
parser.add_argument('-s', '--socksport', help='The port number for a SOCKS5 proxy',dest='socksport',metavar='Port#',default='')
parser.add_argument('-t', '--timeout', help='The Number of seconds to hold a keep-alive session open',dest='timeout',metavar='Seconds',default='30')
parser.add_argument('-u', '--url', help='The URL to retrieve',dest='target_url',metavar='URL',default='')
# parser.print_help()
args = parser.parse_args()
autolog = args.autolog
verbosehelp = args.verbosehelp
inputfile = args.infile
linksfile = args.linksfile
outputfile = args.outputfile
monitormode = args.monitormode
persistent = args.persistent
referrer = args.referrer
request = args.request
timeout = int(args.timeout)
if(len(args.socksport)>0):
socksport = int(args.socksport)
target_url = args.target_url
total_seconds = args.total_seconds
if(verbosehelp == True):
help()
def get_output_file():
'''Allows input of a filename to store web request results.'''
global autolog
global outputfile
if(len(outputfile) > 0):
return
elif autolog == 1:
outputfile = get_next_file()
return
else:
print('\nPlease enter the output filename....')
response = ''
while(response == ''):
response = input('Output File: ')
if(os.path.exists(response)):
print('\nFile already exists...')
response = ''
outputfile = response
def get_post_vars():
'''Allows the input of the variables for a POST request.'''
print('POST requests must have variables.')
print('Please enter POST variables......\n')
print('Format: parameter1=value¶meter2=value¶meter3=value....\n')
response = ''
while(response == ''):
response = input('Post vars: ')
webrequest['postvars'] = response
webrequest['content_length'] = len(webrequest['postvars'])
def get_referrer():
'''Allows entry of a referrer web page.'''
global referrer
if(len(referrer)>0):
if(referrer[0]=="'") or (referrer[0]=='"'):
referrer = referrer[1:]
if(referrer[-1]=="'") or (referrer[-1]=='"'):
referrer = referrer[:-1]
webrequest['referrer'] = referrer
else:
print('\nEnter a referrer if you were redirected from another site.')
print('If there is no referrer, you can leave this blank.\n')
print('Referrer should be in the format: http://www.example.com/somepath/file.html\n')
referrer = input("Referrer: ")
webrequest['referrer'] = referrer
def get_request_type():
'''Allows selection of GET or POST request type.'''
if(len(request)>0):
webrequest['request_type'] = request
if(request == 'GET'):
response = 0
else:
response = 1
else:
request_type = ['GET','POST']
response = -1
while ((int(response) < 0) or (int(response) > (len(request_type)-1))):
print("\nRequest Type")
print("--------------")
for i in range(0,len(request_type)):
print(str(i)+". "+request_type[i])
response = input('\nSelect: ')
if(response==''):
response = -1
if(int(response)!= 0):
webrequest['content_length'] = 0
print('\n')
webrequest['request_type'] = request_type[int(response)]
return int(response)
def get_response_address(response):
'''Parses the web server response for any address links.'''
global address_links
temp = set ()
addresses = re.findall(bytes('<a .*?href=[\'|\"](.*?)[\"|\']',encoding='latin1'),response,re.IGNORECASE)
if(len(addresses)):
for i in addresses:
temp.add(str(i.decode()))
addresses = ''
for i in sorted(temp):
address_links += i + '\n'
def get_response_cookies(response):
'''Parses the retrieved web page for cookies.'''
global cookies
global cookie_array
endline = bytes('\x5C\x72\x5C\x6E','latin')
newline = bytes('\x0D\x0A','latin')
response = response.replace(endline,newline)
setcookie = re.findall(bytes('Set-Cookie: (.*)\n',encoding='latin1'),response,re.IGNORECASE)
if(len(setcookie)):
for i in setcookie:
cookies += i.decode() +'\n'
cookie_array.append(i.decode())
setcookie = ''
def get_response_forms(response):
'''Parses the retrieved web page for HTML forms.'''
global forms
form = re.findall(bytes('<form(.*?)</form>',encoding='latin1'),response,re.IGNORECASE)
if(len(form)):
for i in form:
actionfield = re.search(bytes('action=[\'\"](.*?)[\'\"]',encoding='latin1'),i,re.IGNORECASE)
methodfield = re.search(bytes('method=[\'\"](.*?)[\'\"]',encoding='latin1'),i,re.IGNORECASE)
forms += actionfield.group(0).decode()
forms += methodfield.group(0).decode() + '\n'
actionfield = ''
inputfields = re.findall(bytes('<input(.*?)/>',encoding='latin1'),i,re.IGNORECASE)
for field in inputfields:
typename = re.search(bytes('type=[\'|\"](.*?)[\'|\"]',encoding='latin1'),field,re.IGNORECASE)
fieldname = re.search(bytes('name=[\'|\"](.*?)[\'|\"]',encoding='latin1'),field,re.IGNORECASE)
fieldvalue = re.search(bytes('value=[\'|\"](.*?)[\'|\"]',encoding='latin1'),field,re.IGNORECASE)
if (fieldname is not None):
forms += '\t'+typename.group(0).decode() + ' '+ fieldname.group(0).decode() +' '
if (fieldvalue is not None):
forms += "'"+fieldvalue.group(0).decode()+"'"+'\n'
else:
forms += "''" +'\n'
inputfields = ''
##forms += i.decode() +'\n'
form = ''
def get_response_images(response):
'''Parses the web server response for a listing of image links.'''
global image_links
addresses = re.findall(bytes('<img .*?src=[\'|\"](.*?)[\"|\']',encoding='latin1'),response,re.IGNORECASE)
if(len(addresses)):
for i in addresses:
image_links += str(i.decode()) + '\n'
addresses = ''
def get_response_redirects(response):
'''Searches responses for HTTP 3xx responses that indicate a redirection'''
global was_redirected
redirected = re.match(bytes('HTTP/1.1 3\d\d',encoding='latin1'),response,re.IGNORECASE)
if(redirected):
was_redirected = 'Yes'
if(was_redirected):
gohere = re.findall(bytes('[Ll]ocation: (.*)',encoding='latin1'),response,re.IGNORECASE)
if(gohere):
for i in gohere:
print('\n\n[*] Redirect: '+ i.decode())
was_redirected = i.decode()
b = re.search(bytes('window.location *= *[\'|\"](.*?)[\'|\"]',encoding='latin1'),response,re.IGNORECASE)
if(b):
redirect_message = '[*] Redirect: '+str(b.group().decode())+' *'
print('\n\n\n')
print('*'*(len(redirect_message)))
print(redirect_message)
print('*'*(len(redirect_message)))
was_redirected = b.group().decode()
def get_response_external_scripts(response):
'''Parses the web server response for a listing of image links.'''
global script_links
addresses = re.findall(bytes('<script .*?src=[\'|\"](.*?)[\"|\'].*?<\/script>',encoding='latin1'),response,re.IGNORECASE)
temp = set ()
if(len(addresses)):
for i in addresses:
temp.add(str(i.decode()))
addresses = ''
for i in sorted(temp):
script_links += i + '\n'
def get_response_stylesheets(response):
'''Parse the web server response for a listing of style sheets'''
global css_links
addresses = re.findall(bytes('<link .*?href=[\'|\"](.*?)[\"|\']',encoding='latin1'),response,re.IGNORECASE)
if(len(addresses)):
for i in addresses:
css_links += str(i.decode()) + '\n'
addresses = ''
def get_target_url():
'''Get the target URL to retrieve.'''
global port
global SSL
global target_url
if(len(target_url)>0):
if(target_url[-1] == "'"):
target_url = target_url[1:-1]
webrequest['URL'] = target_url
print(webrequest['URL'])
else:
webrequest['URL'] = input('\nTarget URL: ')
if(webrequest['URL'][0:5] == 'https'):
port = 443
SSL = True
webrequest['protocol'] = 'https'
host = re.sub('^https://','',webrequest['URL'],re.IGNORECASE)
else:
port = 80
SSL = False
webrequest['protocol'] = 'http'
host = re.sub('^http://','',webrequest['URL'],re.IGNORECASE)
host = re.sub('/.*','',host)
colon = host.find(':')
if(colon>0):
port = int(host[int(colon)+1:])
host = host[:colon]
webrequest['host'] = host
resource = webrequest['URL']
resource = re.sub('(https*://)*'+host,'',resource)
if(resource==""):
resource = "/"
if(resource[0]==':'):
resource = re.sub(':\d{1,5}','',resource)
webrequest['resource'] = resource
def get_user_agent():
'''Allows selection of the browser Useragent string.'''
if(len(sys.argv)>1):
webrequest['user_agent'] = get_default_user_agent()
else:
dispatch = [['Use default user agent',get_default_user_agent],
['Generate Internet Explorer user agent',generate_ie_ua_string],
['Generate Firefox user agent',generate_firefox_ua_string],
['Input Custom User Agent',get_input_user_agent]
]
print('\nSelect useragent')
print('----------------\n')
selection = -1
while((int(selection) < 0) or(int(selection) > len(dispatch)-1)):
for i in range(0,len(dispatch)):
print(str(i)+': '+dispatch[i][0])
selection = input('\nPlease select: ')
if(selection == ''):
selection = -1
#webrequest['user_agent'] = 'Mozilla/4.0(compatible; MSIE 7.0b; Windows NT 6.0)'
ua_function = dispatch[int(selection)][1]
webrequest['user_agent'] = ua_function()
def help():
'''Main Help Function'''
a = '''
Spondulas Help
'''
print(a)
while 1:
dispatch = [['About',help_about],
['Features',help_features],
['File Transfers',help_file_transfers],
['Processing HTML files',help_inputfiles],
['Using TOR',help_tor],
['Monitor Mode',help_monitor_mode],
['Exit',sys.exit]
]
get_choices(dispatch,'Select: ')
def help_about():
'''Main help screen'''
a = '''
Title: Spondulas
Purpose: A program to retrieve and parse web pages
Author: Bart Hopper (@d4ncingd4n)
'''
print(a)
def help_features():
'''Help Screen that lists significant features'''
a = '''
Features
-----------
* Support for GET and POST methods
* Support for HTTP and HTTPS methods
* Support for the submission of cookies
* Support for SOCKS5 proxy using TOR
* Support for pipelining (AJAX)
* Monitor mode to poll a website looking for changes in DNS or body content
* Input mode to parse local HTML files, e.g., e-mailed forms
* Automatic conversion of GZIP and Chunked encoding
* Automatic IP address Lookup
* Selection or generation of User Agent Strings
'''
print(a)
def help_file_transfers():
'''Help screen that explains http chunked encoding'''
a = '''
Spondulas automatically decodes gzip and chunked files in a
"-decoded" file. The original file is also preserved.
Binary files are often transfered as a 'chunked' encoding. When
looking at the response from the server, you can identify a
chunked file transfer by the 'Transfer-Encoding' header in the
server response. Here is an example:
HTTP/1.1 200 OK
Date: Sat, 18 Feb 2012 05:37:47 GMT
Server: Apache
X-Powered-By: PHP/4.4.9
Transfer-Encoding: chunked
Content-Type: text/html; charset=utf-8
9a2
[Actual File data]
Notice the "9a2" on the line above. This indicates this chunk is
0x9a2 hexadecimal bytes long (2466 in decimal). If you open the file
in a hex editor, you'll see 0x0D 0x0A after the chunk length
number. Starting from the Carriage Return/Linefeed (0x0D 0x0A) sequence,
go to the section of the file 0x9a2 bytes further in the file.'''
b = '''
When you get to the next location, you'll see the 0x0D 0x0A sequence
followed by a new chunk length and another 0x0D 0x0A sequence.
Using your hex editor, you should delete this sequence of bytes.
In this instance, you would remove 0x0D 0x0A 0x39 0x61 0x32 0x0D 0x0A.
(0x39 0x61 0x32 is character codes for '9a2'). Continue until all chunks
have been properly joined.
Files may be compressed with gzip encoding to reduce file transfer time.
GZIP compress can be recognised with the following header:
HTTP/1.1 200 OK
Date: Tue, 26 Jun 2012 23:36:39 GMT
Server: Apache/2.2.14 (Ubuntu)
Accept-Ranges: bytes
Vary: Accept-Encoding
Content-Encoding: gzip
Content-Length: 439
Content-Type: text/html
'''
c = '''
The HTTP header is terminated by a blank line. The body of
the HTTP response follows the header. Following the header will
be either the gzipped body or the first block for chunked encoding.
Gzip encoding begins with a 0x1F 0x8D sequence. '''
print(a)
response = input('\nPress <Enter> for More: ')
print(b)
response = input('\nPress <Enter> for More: ')
print(c)
def help_inputfiles():
'''Describe using Spondulas to parse standalone HTML files.'''
a = '''
Spondulas can be used to parse standalone HTML files. You may encounter
this if someone e-mails an HTML page with an embedded form as an
attachment or the input of monitor mode. Simply start Spondulas and supply
the -i inputfile argument.
The presence of an -i argument disables the page retrieval functions.
'''
print(a)
def help_monitor_mode():
'''Describe using monitor_mode to monitor websites.'''
a = '''
***********
* CAUTION *
***********
Use caution when using monitor_mode. Using short time values for extended
periods of time could be construed as a hostile action.
Monitor mode is used to detect changes in DNS or HTML body content over
time. The first request is stored in a timestamped output file and the
body of the HTTP response is hashed with SHA1. The SHA1 hash is retained
for comparison with the response to the next request. If the next
response is identical to the last response, the time of request is printed
to screen. If the response is different, the time stamp and has is printed
to screen and the output is saved to a timestamped file. If you wish
to process the changed file for links, you can use the input file mode.
'''
print(a)
response = input('\nPress <Enter> for More: ')
def help_tor():
'''Help for using TOR'''
a = '''
Spondulas supports using the TOR proxy to anonymize your web requests.
It is advisable to hide your source IP when investigating malicious web
pages since attackers will often review their system logs to identify
visitors.
Tor can be obtained: https://www.torproject.org/
By default, TOR is configured to accept SOCKS5 proxy requests on
TCP Port 9050. Spondulas can connect as a SOCKS5 proxy client.
Some malicious websites block TOR exit nodes.
'''
print(a)
def open_investigation_file(filename):
'''Open Investigation file to add the info'''
global outputfile
global cookie_array
# If file doesn't exist, just add the first entries
if (os.path.exists(filename) != 1):
z = open(filename,'w')
a = str(int(outputfile.partition('.')[0])) + '. ' +target_url + '\t(' + ip_address + ')\n'
z.write(a)
# If we have cookies, add them to the file
if(len(cookie_array)):
for i in range(0,len(cookie_array)):
z.write('Cookie: '+cookie_array[i])
else:
z = open(filename,'r+')
oldfile = []
counter = 0
# Load the file into an array so it can be accessed by index
for i in z.readlines():
oldfile.append(i)
# If there is a second file without a referrer due to a user error
if(len(referrer) == 0):
counter = 0
a = str(int(outputfile.partition('.')[0])) + '. ' + target_url + '\t(' + ip_address + ')\n'
oldfile.insert(counter,a)
if(len(cookie_array)):
for n in range(0,len(cookie_array)):
counter += 1
oldfile.insert(counter,'Cookie: '+cookie_array[n])
z.seek(0)
for i in oldfile:
z.write(i)
z.close()
return
for i in range(0,len(oldfile)):
# Counter should be current line + 1
counter += 1
# Capture information on Referrer
if (( re.search(referrer,oldfile[i])) or (counter == len(oldfile))):
# Capture the indent level so we can add one
tabs = re.match('\t*',oldfile[i])
if(tabs is not None):
tabs = tabs.group(0)
else:
tabs = ''
# Prepare current line to insert *somewhere*
a = tabs+'\t'+str(int(outputfile.partition('.')[0])) + '. ' +target_url + '\t(' + ip_address + ')\n'
# Increment counter to skip over cookies
if ((counter < len(oldfile)) and (re.search('Cookie',oldfile[counter]))):
# The entry may have multiple cookies
while((counter < len(oldfile)) and (re.search('Cookie',oldfile[counter]))):
counter += 1
# Insert line after the referrer + cookies
oldfile.insert(counter,a)
# Insert any cookies
if(len(cookie_array)):
for n in range(0,len(cookie_array)):
counter += 1
oldfile.insert(counter,tabs+'\tCookie: '+cookie_array[n])
# Write changes to the file
z.seek(0)
for i in oldfile:
z.write(i)
z.close()
def parse_results():
'''Parse the output file for cookies, forms, etc.'''
global autolog
global decoded
global results
a = open(outputfile,'rb')
b = a.read()
a.close()
b = dechunk(b)
b = ungzip(b)
b = demangle(b)
if(decoded >0):
offset = outputfile.find('.')
decodedfilename = outputfile[0:offset]+'-decoded'+outputfile[offset:]
decoded = open(decodedfilename,'wb')
decoded.write(b)
decoded.close()
a = open(decodedfilename,'rb')
else:
a = open(outputfile,'rb')
for line in a.readlines():
get_response_redirects(line)
get_response_cookies(line) # This works
get_response_forms(line) # This works
get_response_address(line) # This works
get_response_images(line) # This works
get_response_stylesheets(line) # This works
get_response_external_scripts(line)
a.close()
# If autolog is selected, create an investigation file
if(autolog):
open_investigation_file(create_investigation_filename())
def post_processing():
'''Processes HTML files for links'''
if(os.stat(outputfile)[6] != 0):
parse_results()
create_report()
else:
print('\nNo data returned')
os.remove(outputfile)
def retrieve_page():
'''Sends the query to the target URL.'''
global monitormode
if(monitormode != 1):
print('Query being sent')
print('----------------')
print(webrequest['query'])
print('\nDo not be alarmed if the progam appears to "hang."')
print('This is caused by keep-alive packets. A timeout exception')
print('will be raised after '+str(timeout)+' seconds.')
print('\nBirds away.....')
r = ThreadClass()
r.start()
for i in range(1,timeout+15):
if(monitormode != 1):
print('.',end='')
sys.stdout.flush()
time.sleep(1)
if(threading.active_count()==1):
break
def main():
'''Master program function'''
get_options()
if(len(inputfile)>0):
inputfile_processing_mode() # input_processing_mode: Process a local html file
elif(persistent):
persistent_mode()
elif(monitormode):
monitor_mode()
else:
normal_mode() # Normal mode: download a network resource and process
#####################
# Processing Modes #
#####################
def inputfile_processing_mode():
'''The processes HTML files that have been e-mailed, etc'''
global outputfile
outputfile = inputfile
get_link_file(outputfile)
post_processing()
def monitor_mode():
'''Monitor mode retrieves pages and intervals and monitors for changes'''
global outputfile
global total_seconds
get_user_agent()
get_target_url()
if(get_request_type()):
get_post_vars()
get_referrer()
get_cookies()
webrequest['query'] = create_request()
if(total_seconds == 0):
timer_get_sleep_time()
last_hash = ''
last_ip = ''
while 1:
timestamp = timer_build_timestamp()
outputfile = timestamp + '.txt'
outputfile = outputfile.replace(':','-')
retrieve_page()
a = open(outputfile,'rb')
b = a.read()
a.close()
first = b.find(bytes('\x0D\x0A\x0D\x0A','latin'))
result = b[first:]
new_hash = timer_get_sha(result)
new_ip = hashlib.sha1(bytes(ip_address,'latin')).hexdigest()
if((new_hash != last_hash)or(new_ip != last_ip)):
print('\n'+timestamp+'\t'+new_hash)
last_hash = new_hash
last_ip = new_ip
else:
print(timestamp)
os.unlink(outputfile)
time.sleep(total_seconds)
def normal_mode():
'''Normal mode retrieves and parses the file'''
get_user_agent()
get_target_url()
if(get_request_type()):
get_post_vars()
get_referrer()
get_cookies()
get_output_file()
get_link_file(outputfile)
webrequest['query'] = create_request()
retrieve_page()
post_processing()
def persistent_mode():
'''Persistent Mode allows keepalive connection/ajax'''
global request
get_user_agent()
get_target_url()
if(get_request_type()):
get_post_vars()
get_referrer()
get_cookies()
webrequest['query'] = create_request()
# Copy the target_url into the referrer field
webrequest['referrer'] = webrequest['protocol'] + \
'://' + webrequest['host'] + webrequest['resource']
old_host = webrequest['host']
request = ''
more_choices = 1
while(more_choices):
print('\nPlease enter the AJAX request:')
webrequest['postvars'] = ''
webrequest['content_length'] = ''
if(get_request_type()):
get_post_vars()
get_target_url()
webrequest['host'] = old_host
temp_request = create_request()
ajax.append(temp_request)
answer = ''
while(len(answer) != 1):
answer = input('More [Y/N]? ')
if(answer.lower() == 'n'):
more_choices = 0
break
get_output_file()
get_link_file(outputfile)
retrieve_page()
post_processing()
class ThreadClass(threading.Thread):
'''Class used to make the actual webrequest. This allows threading'''
def run(self):
global ip_address
o = open(outputfile,'wb')
try:
if (socksport):
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(('localhost',socksport))
s.settimeout(timeout)
r = s.send(bytes('\x05\x01\x00',encoding='ascii'))
b = s.recv(2)
host = webrequest['host']
if(SSL):
temp = '\x05\x01\x00\x03'+ chr(len(host)) + host +'\x01\xBB'
s.send(bytes(temp,'ascii'))
b = s.recv(14)
else:
temp = '\x05\x01\x00\x03'+ chr(len(host)) + host +'\x00\x50'
s.send(bytes(temp,'ascii'))
b = s.recv(14)
elif(SSL):
a = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
a.connect((webrequest['host'],port))
a.settimeout(timeout)
s = ssl.wrap_socket(a)
else:
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# If the connection times out too quickly to retrieve all data
# increase the timeout
s.settimeout(timeout)
s.connect((webrequest['host'],port))
ip_address = socket.gethostbyname_ex(webrequest['host'])[2][0]
print('IP address: '+ip_address)
s.send(bytes(webrequest['query'],'ascii'))
for i in ajax:
s.send(bytes('\n'+i,'ascii'))
while 1:
server_response = s.recv(4096)
if(len(server_response) == 0): raise socket.timeout
o.write(server_response)
except socket.gaierror:
print('\nUnable to find target server')
s.close()
o.flush()
o.close()
except socket.herror:
print('Unable to find target server')
s.close()
o.close()
except ssl.SSLError:
pass
#print('Timeout reached')
except socket.timeout:
s.close()
o.close()
else:
s.close()
o.flush()
o.close()
def timer_build_timestamp():
'''Creates Timestamp for request in monitor mode. Also used to create outputfilename.'''
a = time.localtime()
b = str(a.tm_year)+'-'+'{:02d}'.format(a.tm_mon)+'-'
b += '{:02d}'.format(a.tm_mday)+'_'+'{:02d}'.format(a.tm_hour)
b += ':'+'{:02d}'.format(a.tm_min)+':'+'{:02d}'.format(a.tm_sec)
return b
def timer_calculate():
'''Calculates sleep time for monitor mode.'''
global total_seconds
global time_calculated
total_seconds = (days * 86400) + (hours * 3600) + (minutes * 60) + seconds
print('Time: '+str(total_seconds)+' seconds')
time_calculated = 1
def timer_days():
'''Allows entry of days to sleep in monitor mode'''
global days
print('\n\nMaximum Days: 25\n')
days = input('Days to sleep: ')
days = int(days)
if(days > 25):
days = 25
def timer_get_sha(a):
'''Returns the SHA1 signature of the body of the web page and IP used in monitor mode.'''
return hashlib.sha1(a).hexdigest()
def timer_get_sleep_time():
'''Allows input of sleep time for monitor mode.'''
timer_menu_get_sleep_time()
def timer_hours():
'''Allows entry of hours to sleep in monitor mode'''
global hours
hours = input('Hours to sleep: ')
hours = int(hours)
def timer_menu_get_sleep_time():
'''Builds the menu to calculate sleep time for monitor mode.'''
while (time_calculated < 1):
print('Monitor Mode timer settings')
print('---------------------------\n\n')
print('************')
print('* CAUTION! *')
print('************')
print('Use caution when setting delays between checks.')
print('You don\'t want to be accused of attacking the website.')
print('\nDays should be less than 25')
dispatch = [['Days\t\t\t'+str(days),timer_days],
['Hours\t\t'+str(hours),timer_hours],
['Minutes\t\t'+str(minutes),timer_minutes],
['Seconds\t\t'+str(seconds),timer_seconds],
['Calculate Sleep and Continue',timer_calculate]]
get_choices(dispatch,'Select: ')
def timer_minutes():
'''Allows entry of minutes to sleep in monitor mode'''
global minutes
minutes = input('Minutes to sleep: ')
minutes = int(minutes)
def timer_seconds():
'''Allows entry of seconds to sleep in monitor mode'''
global seconds
seconds = input('Seconds to sleep: ')
seconds = int(seconds)
def ungzip(a):
'''This function decodes gzipped pages'''
global decoded
data = bytes()
gzipped = a.find(bytes('Content-Encoding: gzip','latin'))
if(gzipped > 0):
decoded = 1
first = a.find(bytes('\x1f\x8b','latin'))
data += a[:first]
b = a[first:]
data += gzip.decompress(b)
else:
data = a
return data
#########################
# Program Entry Point #
#########################
if __name__ == '__main__':
main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fused_batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test
class BatchNormalizationTest(test.TestCase):
def _batch_norm(self, x, mean, var, offset, scale, epsilon):
# We compute the batch norm manually in this function because
# nn_impl.batch_normalization does not support float16 yet.
# TODO(reedwm): Add float16 support to nn_impl.batch_normalization.
inv = math_ops.rsqrt(var + epsilon) * scale
y = math_ops.cast(x, scale.dtype) * inv + (offset - mean * inv)
return math_ops.cast(y, x.dtype)
def _inference_ref(self, x, scale, offset, mean, var, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
y = self._batch_norm(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return y.eval()
def _test_inference(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
mean = constant_op.constant(mean_val, name='mean')
var = constant_op.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=var,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val = sess.run(y)
y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon,
data_format)
# An atol value of 1e-3 is too small for float16's, because some adjacent
# float16 values that y_val can take are greater than 1e-3 apart, e.g.
# 2.16602 and 2.16797.
atol = 2e-3 if x_dtype == np.float16 else 1e-3
self.assertAllClose(y_ref, y_val, atol=atol)
def _training_ref(self, x, scale, offset, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
mean, var = nn_impl.moments(
math_ops.cast(x, scale.dtype), [0, 1, 2], keep_dims=False)
y = self._batch_norm(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return y.eval(), mean.eval(), var.eval()
def _test_training(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
epsilon = 0.001
y, mean, var = nn_impl.fused_batch_norm(
x,
scale,
offset,
epsilon=epsilon,
data_format=data_format,
is_training=True)
y_val, mean_val, var_val = sess.run([y, mean, var])
y_ref, mean_ref, var_ref = self._training_ref(x, scale, offset, epsilon,
data_format)
y_atol = 2e-3 if x_dtype == np.float16 else 1e-3
self.assertAllClose(y_ref, y_val, atol=y_atol)
self.assertAllClose(mean_ref, mean_val, atol=1e-3)
# This is for Bessel's correction. tf.nn.moments uses n, instead of n-1, as
# the denominator in the formula to calculate variance, while
# tf.nn.fused_batch_norm has Bessel's correction built in.
sample_size = x_val.size / scale_val.size
var_ref = var_ref * sample_size / (max(sample_size - 1.0, 1.0))
self.assertAllClose(var_ref, var_val, atol=1e-3)
def _compute_gradient_error_float16(self, x, x32, x_shape, y, y32, y_shape):
"""Computes the gradient error for float16 inputs and/or outputs.
This returns the same value as gradient_checker.compute_gradient_error. The
difference is that gradient_checker.compute_gradient_error does not
numerically compute the gradients in a numerically stable way for float16
tensors. To fix this, this function requires float32 versions of x and y to
numerically compute the gradients, to compare with the float16 symbolically
computed gradients.
Args:
x: The input tensor.
x32: A float32 version of x.
x_shape: The shape of x.
y: The output tensor.
y32: A float32 version of y. Must be calculated based on x32, not x.
y_shape: The shape of y.
Returns:
The maximum error in between the two Jacobians, as in
gradient_checker.compute_gradient_error.
"""
x_init_val = np.random.random_sample(x_shape).astype(np.float16)
x32_init_val = x_init_val.astype(np.float32)
# TODO(reedwm): Do not perform the unnecessary computations in
# compute_gradient, since they double the computation time of this function.
theoretical_grad, _ = gradient_checker.compute_gradient(
x, x_shape, y, y_shape, delta=1e-3, x_init_value=x_init_val)
_, numerical_grad = gradient_checker.compute_gradient(
x32, x_shape, y32, y_shape, delta=1e-3, x_init_value=x32_init_val)
return np.fabs(theoretical_grad - numerical_grad).max()
def _test_gradient(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC',
is_training=True):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)
pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
if x_dtype != np.float16:
err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape)
err_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, y, x_shape)
err_offset = gradient_checker.compute_gradient_error(
offset, scale_shape, y, x_shape)
else:
x32 = constant_op.constant(x_val, name='x32', dtype=dtypes.float32)
y32, _, _ = nn_impl.fused_batch_norm(
x32,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
err_x = self._compute_gradient_error_float16(x, x32, x_shape, y, y32,
x_shape)
err_scale = self._compute_gradient_error_float16(
scale, scale, scale_shape, y, y32, x_shape)
err_offset = self._compute_gradient_error_float16(
offset, offset, scale_shape, y, y32, x_shape)
x_err_tolerance = 2e-3 if x_dtype == np.float16 else 1e-3
scale_err_tolerance = 1e-3
self.assertLess(err_x, x_err_tolerance)
self.assertLess(err_scale, scale_err_tolerance)
self.assertLess(err_offset, scale_err_tolerance)
def _test_grad_grad(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC',
is_training=True,
err_tolerance=1e-3):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
grad_y_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
grad_y = constant_op.constant(grad_y_val, name='grad_y')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)
pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
grad_x, grad_scale, grad_offset = gradients_impl.gradients(
y, [x, scale, offset], grad_y)
if is_training:
epsilon = y.op.get_attr('epsilon')
data_format = y.op.get_attr('data_format')
grad_vals = sess.run([grad_x, grad_scale, grad_offset])
grad_internal = nn_grad._BatchNormGrad(grad_y, x, scale, pop_mean, pop_var, epsilon, data_format)
grad_internal_vals = sess.run(list(grad_internal))
for grad_val, grad_internal_val in zip(grad_vals, grad_internal_vals):
self.assertAllClose(grad_val, grad_internal_val, atol=err_tolerance)
if x_dtype != np.float16:
err_grad_grad_y_1 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_x, x_shape)
err_grad_grad_y_2 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_scale, scale_shape)
err_grad_grad_y_3 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_offset, scale_shape)
# In freeze mode, grad_x is not a function of x.
if is_training:
err_grad_x_1 = gradient_checker.compute_gradient_error(
x, x_shape, grad_x, x_shape)
err_grad_x_2 = gradient_checker.compute_gradient_error(
x, x_shape, grad_scale, scale_shape)
err_grad_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, grad_x, x_shape)
else:
x32 = constant_op.constant(x_val, dtype=dtypes.float32, name='x32')
grad_y32 = constant_op.constant(
grad_y_val, dtype=dtypes.float32, name='grad_y32')
y32, _, _ = nn_impl.fused_batch_norm(
x32,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
grad_x32, grad_scale32, grad_offset32 = gradients_impl.gradients(
y32, [x32, scale, offset], grad_y32)
err_grad_grad_y_1 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_x, grad_x32, x_shape)
err_grad_grad_y_2 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_scale, grad_scale32, scale_shape)
err_grad_grad_y_3 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_offset, grad_offset32, scale_shape)
# In freeze mode, grad_x is not a function of x.
if is_training:
err_grad_x_1 = self._compute_gradient_error_float16(
x, x32, x_shape, grad_x, grad_x32, x_shape)
err_grad_x_2 = self._compute_gradient_error_float16(
x, x32, x_shape, grad_scale, grad_scale32, scale_shape)
err_grad_scale = self._compute_gradient_error_float16(
scale, scale, scale_shape, grad_x, grad_x32, x_shape)
self.assertLess(err_grad_grad_y_1, err_tolerance)
self.assertLess(err_grad_grad_y_2, err_tolerance)
self.assertLess(err_grad_grad_y_3, err_tolerance)
if is_training:
self.assertLess(err_grad_x_1, err_tolerance)
self.assertLess(err_grad_x_2, err_tolerance)
self.assertLess(err_grad_scale, err_tolerance)
def testInference(self):
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_inference(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC')
self._test_inference(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW')
self._test_inference(
x_shape, np.float32, [1], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_inference(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC')
self._test_inference(
x_shape, np.float32, [2], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_inference(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_inference(
x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW')
self._test_inference(
x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC')
self._test_inference(
x_shape, np.float32, [6], np.float32, use_gpu=False, data_format='NHWC')
def testTraining(self):
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW')
self._test_training(
x_shape, np.float32, [1], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, np.float32, [2], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_training(
x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW')
self._test_training(
x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, np.float32, [6], np.float32, use_gpu=False, data_format='NHWC')
def testBatchNormGrad(self):
for is_training in [True, False]:
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_gradient(
x_shape,
dtype, [1],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [1],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape,
np.float32, [1],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_gradient(
x_shape,
dtype, [2],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape,
np.float32, [2],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_gradient(
x_shape,
dtype, [2],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training)
x_shape = [5, 7, 11, 4]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_gradient(
x_shape,
dtype, [7],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [4],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape,
np.float32, [4],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training)
def _testBatchNormGradGrad(self, config):
shape = config['shape']
err_tolerance = config['err_tolerance']
dtype = config['dtype']
for is_training in [True, False]:
if test.is_gpu_available(cuda_only=True):
self._test_grad_grad(
shape,
dtype, [shape[3]],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape,
dtype, [shape[1]],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training,
err_tolerance=err_tolerance)
if dtype != np.float16:
self._test_grad_grad(
shape,
np.float32, [shape[3]],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training,
err_tolerance=err_tolerance)
def testBatchNormGradGrad(self):
configs = [{
'shape': [2, 3, 4, 5],
'err_tolerance': 1e-2,
'dtype': np.float32,
}, {
'shape': [2, 3, 2, 2],
'err_tolerance': 1e-3,
'dtype': np.float32,
}, {
'shape': [2, 3, 2, 2],
'err_tolerance': 2e-3,
'dtype': np.float16,
}]
for config in configs:
self._testBatchNormGradGrad(config)
if __name__ == '__main__':
test.main()
|
|
"""Async gunicorn worker for aiohttp.web"""
import asyncio
import os
import re
import signal
import socket
import ssl
import sys
from gunicorn.config import AccessLogFormat as GunicornAccessLogFormat
from gunicorn.workers import base
from .helpers import AccessLogger, create_future, ensure_future
__all__ = ('GunicornWebWorker',
'GunicornUVLoopWebWorker',
'GunicornTokioWebWorker')
class GunicornWebWorker(base.Worker):
DEFAULT_AIOHTTP_LOG_FORMAT = AccessLogger.LOG_FORMAT
DEFAULT_GUNICORN_LOG_FORMAT = GunicornAccessLogFormat.default
def __init__(self, *args, **kw): # pragma: no cover
super().__init__(*args, **kw)
self.servers = {}
self.exit_code = 0
self._notify_waiter = None
def init_process(self):
# create new event_loop after fork
asyncio.get_event_loop().close()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
super().init_process()
def run(self):
if hasattr(self.wsgi, 'startup'):
self.loop.run_until_complete(self.wsgi.startup())
self._runner = ensure_future(self._run(), loop=self.loop)
try:
self.loop.run_until_complete(self._runner)
finally:
self.loop.close()
sys.exit(self.exit_code)
def make_handler(self, app):
if hasattr(self.wsgi, 'make_handler'):
access_log = self.log.access_log if self.cfg.accesslog else None
return app.make_handler(
loop=self.loop,
logger=self.log,
slow_request_timeout=self.cfg.timeout,
keepalive_timeout=self.cfg.keepalive,
access_log=access_log,
access_log_format=self._get_valid_log_format(
self.cfg.access_log_format))
else:
raise RuntimeError(
"aiohttp.wsgi is not supported anymore, "
"consider to switch to aiohttp.web.Application")
@asyncio.coroutine
def close(self):
if self.servers:
servers = self.servers
self.servers = None
# stop accepting connections
for server, handler in servers.items():
self.log.info("Stopping server: %s, connections: %s",
self.pid, len(handler.connections))
server.close()
yield from server.wait_closed()
# send on_shutdown event
if hasattr(self.wsgi, 'shutdown'):
yield from self.wsgi.shutdown()
# stop alive connections
tasks = [
handler.shutdown(
timeout=self.cfg.graceful_timeout / 100 * 95)
for handler in servers.values()]
yield from asyncio.gather(*tasks, loop=self.loop)
# cleanup application
if hasattr(self.wsgi, 'cleanup'):
yield from self.wsgi.cleanup()
@asyncio.coroutine
def _run(self):
ctx = self._create_ssl_context(self.cfg) if self.cfg.is_ssl else None
for sock in self.sockets:
handler = self.make_handler(self.wsgi)
if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX:
srv = yield from self.loop.create_unix_server(
handler, sock=sock.sock, ssl=ctx)
else:
srv = yield from self.loop.create_server(
handler, sock=sock.sock, ssl=ctx)
self.servers[srv] = handler
# If our parent changed then we shut down.
pid = os.getpid()
try:
while self.alive:
self.notify()
cnt = sum(handler.requests_count
for handler in self.servers.values())
if self.cfg.max_requests and cnt > self.cfg.max_requests:
self.alive = False
self.log.info("Max requests, shutting down: %s", self)
elif pid == os.getpid() and self.ppid != os.getppid():
self.alive = False
self.log.info("Parent changed, shutting down: %s", self)
else:
yield from self._wait_next_notify()
except BaseException:
pass
yield from self.close()
def _wait_next_notify(self):
self._notify_waiter_done()
self._notify_waiter = waiter = create_future(self.loop)
self.loop.call_later(1.0, self._notify_waiter_done)
return waiter
def _notify_waiter_done(self):
waiter = self._notify_waiter
if waiter is not None and not waiter.done():
waiter.set_result(True)
self._notify_waiter = None
def init_signals(self):
# Set up signals through the event loop API.
self.loop.add_signal_handler(signal.SIGQUIT, self.handle_quit,
signal.SIGQUIT, None)
self.loop.add_signal_handler(signal.SIGTERM, self.handle_exit,
signal.SIGTERM, None)
self.loop.add_signal_handler(signal.SIGINT, self.handle_quit,
signal.SIGINT, None)
self.loop.add_signal_handler(signal.SIGWINCH, self.handle_winch,
signal.SIGWINCH, None)
self.loop.add_signal_handler(signal.SIGUSR1, self.handle_usr1,
signal.SIGUSR1, None)
self.loop.add_signal_handler(signal.SIGABRT, self.handle_abort,
signal.SIGABRT, None)
# Don't let SIGTERM and SIGUSR1 disturb active requests
# by interrupting system calls
signal.siginterrupt(signal.SIGTERM, False)
signal.siginterrupt(signal.SIGUSR1, False)
def handle_quit(self, sig, frame):
self.alive = False
# worker_int callback
self.cfg.worker_int(self)
# init closing process
self._closing = ensure_future(self.close(), loop=self.loop)
# close loop
self.loop.call_later(0.1, self._notify_waiter_done)
def handle_abort(self, sig, frame):
self.alive = False
self.exit_code = 1
self.cfg.worker_abort(self)
sys.exit(1)
@staticmethod
def _create_ssl_context(cfg):
""" Creates SSLContext instance for usage in asyncio.create_server.
See ssl.SSLSocket.__init__ for more details.
"""
ctx = ssl.SSLContext(cfg.ssl_version)
ctx.load_cert_chain(cfg.certfile, cfg.keyfile)
ctx.verify_mode = cfg.cert_reqs
if cfg.ca_certs:
ctx.load_verify_locations(cfg.ca_certs)
if cfg.ciphers:
ctx.set_ciphers(cfg.ciphers)
return ctx
def _get_valid_log_format(self, source_format):
if source_format == self.DEFAULT_GUNICORN_LOG_FORMAT:
return self.DEFAULT_AIOHTTP_LOG_FORMAT
elif re.search(r'%\([^\)]+\)', source_format):
raise ValueError(
"Gunicorn's style options in form of `%(name)s` are not "
"supported for the log formatting. Please use aiohttp's "
"format specification to configure access log formatting: "
"http://aiohttp.readthedocs.io/en/stable/logging.html"
"#format-specification"
)
else:
return source_format
class GunicornUVLoopWebWorker(GunicornWebWorker):
def init_process(self):
import uvloop
# Close any existing event loop before setting a
# new policy.
asyncio.get_event_loop().close()
# Setup uvloop policy, so that every
# asyncio.get_event_loop() will create an instance
# of uvloop event loop.
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
super().init_process()
class GunicornTokioWebWorker(GunicornWebWorker):
def init_process(self):
import tokio
# Close any existing event loop before setting a
# new policy.
asyncio.get_event_loop().close()
# Setup tokio policy, so that every
# asyncio.get_event_loop() will create an instance
# of tokio event loop.
asyncio.set_event_loop_policy(tokio.EventLoopPolicy())
super().init_process()
|
|
r"""
Util functions (:mod: `qiita_pet.util`)
======================================
..currentmodule:: qiita_pet.util
This module provides different util functions for qiita_pet.
Methods
-------
..autosummary::
:toctree: generated/
clean_str
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from future.utils import viewitems
from tornado.escape import linkify as tornado_linkify, xhtml_unescape
from qiita_core.util import execute_as_transaction
from qiita_db.reference import Reference
STATUS_STYLER = {
'sandbox':
('glyphicon glyphicon-eye-close', 'glyphicon glyphicon-lock', 'gray'),
'awaiting_approval':
('glyphicon glyphicon-eye-open', 'glyphicon glyphicon-lock', 'peru'),
'private':
('glyphicon glyphicon-eye-open', 'glyphicon glyphicon-lock',
'#3599FD'),
'public':
('glyphicon glyphicon-eye-open', 'glyphicon glyphicon-globe', 'green')}
EBI_LINKIFIER = ('<a href="http://www.ebi.ac.uk/ena/data/view/{0}" '
'target="_blank">{0}</a>')
def linkify(link_template, item):
"""Formats a strings into a URL using string replacement
Paramters
---------
link_template : str
The template for the URL.
item : list or tuple of str
The strings that will be inserted into the template
"""
return link_template.format(*item)
def clean_str(item):
"""Converts input to string and replaces spaces with underscores
Parameters
----------
item : anything convertable to string
item to convert and clean
Returns
-------
str
cleaned string
"""
return str(item).replace(" ", "_").replace(":", "")
def convert_text_html(message):
"""Linkify URLs and turn newlines into <br/> for HTML"""
html = xhtml_unescape(tornado_linkify(message))
return html.replace('\n', '<br/>')
@execute_as_transaction
def generate_param_str(param):
"""Generate an html string with the parameter values
Parameters
----------
param : BaseParameters
The parameter to generate the str
Returns
-------
str
The html string with the parameter set values
"""
values = param.values
ref = Reference(values['reference'])
result = ["<b>Reference:</b> %s %s" % (ref.name, ref.version)]
result.extend("<b>%s:</b> %s" % (name, value)
for name, value in viewitems(values)
if name != 'reference')
return "<br/>".join(result)
def is_localhost(host):
"""Verifies if the connection is local
Parameters
----------
host : str
The requesting host, in general self.request.headers['host']
Returns
-------
bool
True if local request
"""
localhost = ('localhost', '127.0.0.1')
return host.startswith(localhost)
def get_artifact_processing_status(artifact):
"""Gets the processing status of the artifact
Parameters
----------
artifact : qiita_db.artifact.Artifact
The artifact to get the processing status
Returns
-------
str, str
The processing status {'processing', 'failed', 'success',
'Not processed'}
A summary of the jobs attached to the artifact
"""
preprocessing_status = 'Not processed'
preprocessing_status_msg = []
for job in artifact.jobs():
job_status = job.status
if job_status == 'error':
if preprocessing_status != 'success':
preprocessing_status = 'failed'
preprocessing_status_msg.append(
"<b>Job %s</b>: failed - %s"
% (job.id, job.log.msg))
elif job_status == 'success':
preprocessing_status = 'success'
else:
if preprocessing_status != 'success':
preprocessing_status = 'processing'
preprocessing_status_msg.append(
"<b>Job %s</b>: %s" % (job.id, job_status))
if not preprocessing_status_msg:
preprocessing_status_msg = 'Not processed'
else:
preprocessing_status_msg = convert_text_html(
'<br/>'.join(preprocessing_status_msg))
return preprocessing_status, preprocessing_status_msg
def get_network_nodes_edges(graph, full_access, nodes=None, edges=None):
"""Returns the JavaScript friendly representation of the graph
Parameters
----------
graph : networkx.DiGraph
The artifact/jobs graph
full_access : bool
Whether the user has full access to the graph or not
nodes : list, optional
A pre-populated list of nodes. Useful for the analysis pipeline
edges : list, optional
A pre-populated list of edges. Useful for the analysis pipeline
Returns
-------
(list, list, int)
The list of nodes, the list of edges, and the worklfow id if there is
any job on construction
"""
nodes = nodes if nodes is not None else []
edges = edges if edges is not None else []
workflow_id = None
# n[0] is the data type: job/artifact/type
# n[1] is the object
for n in graph.nodes():
if n[0] == 'job':
atype = 'job'
name = n[1].command.name
status = n[1].status
if status == 'in_construction':
workflow_id = n[1].processing_job_workflow.id
elif n[0] == 'artifact':
atype = n[1].artifact_type
status = 'artifact'
if full_access or n[1].visibility == 'public':
name = '%s\n(%s)' % (n[1].name, n[1].artifact_type)
else:
continue
elif n[0] == 'type':
atype = n[1].type
name = '%s\n(%s)' % (n[1].name, n[1].type)
status = 'type'
else:
# this should never happen but let's add it just in case
raise ValueError('not valid node type: %s' % n[0])
nodes.append((n[0], atype, n[1].id, name, status))
edges.extend([(n[1].id, m[1].id) for n, m in graph.edges()])
return nodes, edges, workflow_id
|
|
# Copyright 2013 NetApp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The share snapshots api."""
from oslo_log import log
import webob
from webob import exc
from manila.api import common
from manila.api.openstack import wsgi
from manila.api.views import share_snapshots as snapshot_views
from manila import db
from manila import exception
from manila.i18n import _, _LI
from manila import share
LOG = log.getLogger(__name__)
class ShareSnapshotsController(wsgi.Controller, wsgi.AdminActionsMixin):
"""The Share Snapshots API controller for the OpenStack API."""
resource_name = 'share_snapshot'
_view_builder_class = snapshot_views.ViewBuilder
def __init__(self):
super(ShareSnapshotsController, self).__init__()
self.share_api = share.API()
def _update(self, *args, **kwargs):
db.share_snapshot_update(*args, **kwargs)
def _get(self, *args, **kwargs):
return self.share_api.get_snapshot(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.share_api.delete_snapshot(*args, **kwargs)
@wsgi.Controller.api_version('1.0', '2.6')
@wsgi.action('os-reset_status')
def snapshot_reset_status_legacy(self, req, id, body):
return self._reset_status(req, id, body)
@wsgi.Controller.api_version('2.7')
@wsgi.action('reset_status')
def snapshot_reset_status(self, req, id, body):
return self._reset_status(req, id, body)
@wsgi.Controller.api_version('1.0', '2.6')
@wsgi.action('os-force_delete')
def snapshot_force_delete_legacy(self, req, id, body):
return self._force_delete(req, id, body)
@wsgi.Controller.api_version('2.7')
@wsgi.action('force_delete')
def snapshot_force_delete(self, req, id, body):
return self._force_delete(req, id, body)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['manila.context']
try:
snapshot = self.share_api.get_snapshot(context, id)
# Snapshot with no instances is filtered out.
if(snapshot.get('status') is None):
raise exc.HTTPNotFound()
except exception.NotFound:
raise exc.HTTPNotFound()
return self._view_builder.detail(req, snapshot)
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['manila.context']
LOG.info(_LI("Delete snapshot with id: %s"), id, context=context)
try:
snapshot = self.share_api.get_snapshot(context, id)
self.share_api.delete_snapshot(context, snapshot)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._get_snapshots(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._get_snapshots(req, is_detail=True)
def _get_snapshots(self, req, is_detail):
"""Returns a list of snapshots."""
context = req.environ['manila.context']
search_opts = {}
search_opts.update(req.GET)
# Remove keys that are not related to share attrs
search_opts.pop('limit', None)
search_opts.pop('offset', None)
sort_key = search_opts.pop('sort_key', 'created_at')
sort_dir = search_opts.pop('sort_dir', 'desc')
# NOTE(vponomaryov): Manila stores in DB key 'display_name', but
# allows to use both keys 'name' and 'display_name'. It is leftover
# from Cinder v1 and v2 APIs.
if 'name' in search_opts:
search_opts['display_name'] = search_opts.pop('name')
common.remove_invalid_options(context, search_opts,
self._get_snapshots_search_options())
snapshots = self.share_api.get_all_snapshots(
context,
search_opts=search_opts,
sort_key=sort_key,
sort_dir=sort_dir,
)
# Snapshots with no instances are filtered out.
snapshots = list(filter(lambda x: x.get('status') is not None,
snapshots))
limited_list = common.limited(snapshots, req)
if is_detail:
snapshots = self._view_builder.detail_list(req, limited_list)
else:
snapshots = self._view_builder.summary_list(req, limited_list)
return snapshots
def _get_snapshots_search_options(self):
"""Return share search options allowed by non-admin."""
return ('display_name', 'name', 'status', 'share_id', 'size')
def update(self, req, id, body):
"""Update a snapshot."""
context = req.environ['manila.context']
if not body or 'snapshot' not in body:
raise exc.HTTPUnprocessableEntity()
snapshot_data = body['snapshot']
valid_update_keys = (
'display_name',
'display_description',
)
update_dict = {key: snapshot_data[key]
for key in valid_update_keys
if key in snapshot_data}
try:
snapshot = self.share_api.get_snapshot(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
snapshot = self.share_api.snapshot_update(context, snapshot,
update_dict)
snapshot.update(update_dict)
return self._view_builder.detail(req, snapshot)
@wsgi.response(202)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['manila.context']
if not self.is_valid_body(body, 'snapshot'):
raise exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
share_id = snapshot['share_id']
share = self.share_api.get(context, share_id)
# Verify that share can be snapshotted
if not share['snapshot_support']:
msg = _("Snapshot cannot be created from share '%s', because "
"share back end does not support it.") % share_id
LOG.error(msg)
raise exc.HTTPUnprocessableEntity(explanation=msg)
LOG.info(_LI("Create snapshot from share %s"),
share_id, context=context)
# NOTE(rushiagr): v2 API allows name instead of display_name
if 'name' in snapshot:
snapshot['display_name'] = snapshot.get('name')
del snapshot['name']
# NOTE(rushiagr): v2 API allows description instead of
# display_description
if 'description' in snapshot:
snapshot['display_description'] = snapshot.get('description')
del snapshot['description']
new_snapshot = self.share_api.create_snapshot(
context,
share,
snapshot.get('display_name'),
snapshot.get('display_description'))
return self._view_builder.detail(
req, dict(new_snapshot.items()))
def create_resource():
return wsgi.Resource(ShareSnapshotsController())
|
|
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Native Injector module
"""
import os
from ConfigParser import RawConfigParser
from subprocess import check_output, CalledProcessError
class Injector(object):
"""
Injector class, provides all logic to inject. However, the unittest injector
only provides functionality required in the unittests
"""
def __init__(self):
"""
This class should be fully static
"""
raise RuntimeError('This class should not be instantiated.')
@staticmethod
def inject_configuration(provider):
""" Injects the Config module """
def _get(key):
filename, section, item = key.split('.', 2)
config = RawConfigParser()
config.read('/opt/OpenvStorage/config/{0}.cfg'.format(filename))
return config.get(section, item)
def _set(key, value):
filename, section, item = key.split('.', 2)
config = RawConfigParser()
config.read('/opt/OpenvStorage/config/{0}.cfg'.format(filename))
config.set(section, item, value)
with open('/opt/OpenvStorage/config/{0}.cfg'.format(filename), 'w') as config_file:
config.write(config_file)
def _get_int(key):
return int(_get(key))
provider.get = staticmethod(_get)
provider.getInt = staticmethod(_get_int)
provider.set = staticmethod(_set)
return provider
@staticmethod
def inject_remote(provider):
""" Injects the remote module """
class Cuisine:
import cuisine
import fabric
api = cuisine
fabric = fabric.api
provider.fabric = Cuisine().fabric
provider.cuisine = Cuisine()
return provider
@staticmethod
def inject_service(provider):
""" Injects the Service module """
def _service_exists(name, path=None):
if path is None:
path = '/etc/init/'
return os.path.exists('{0}{1}.conf'.format(path, name))
def _get_name(name, path=None):
if _service_exists(name, path):
return name
name = 'ovs-{0}'.format(name)
if _service_exists(name, path):
return name
raise ValueError('Service {0} could not be found.'.format(name))
def add_service(package, name, command, stop_command, params=None):
_ = package, command, stop_command
if params is None:
params = {}
name = _get_name(name, '/opt/OpenvStorage/config/templates/upstart/')
template_dir = '/opt/OpenvStorage/config/templates/upstart/{0}'
upstart_dir = '/etc/init/{0}'
upstart_conf = '{0}.conf'.format(name)
with open(template_dir.format(upstart_conf), 'r') as template_file:
template_conf = template_file.read()
for key, value in params.iteritems():
print 'replacing {0} by {1}'.format(key, value)
template_conf = template_conf.replace(key, value)
print '\n\n\n service {0} configfile \n {1}'.format(name, template_conf)
with open(upstart_dir.format(upstart_conf), 'wb') as upstart_file:
upstart_file.write(template_conf)
def get_service_status(name):
try:
name = _get_name(name)
output = check_output('status {0}'.format(name), shell=True)
if 'start' in output:
return True
if 'stop' in output:
return False
except CalledProcessError:
pass
return None
def remove_service(domain, name):
_ = domain
# remove upstart.conf file
name = _get_name(name)
check_output('rm -rf /etc/init/{0}.conf'.format(name), shell=True)
check_output('rm -rf /etc/init/{0}.override'.format(name), shell=True)
def disable_service(name):
name = _get_name(name)
check_output('echo "manual" > /etc/init/{0}.override'.format(name), shell=True)
def enable_service(name):
name = _get_name(name)
check_output('rm -f /etc/init/{0}.override'.format(name), shell=True)
def start_service(name):
try:
name = _get_name(name)
output = check_output('start {0}'.format(name), shell=True)
except CalledProcessError as cpe:
output = cpe.output
return output
def stop_service(name):
try:
name = _get_name(name)
output = check_output('stop {0}'.format(name), shell=True)
except CalledProcessError as cpe:
output = cpe.output
return output
def restart_service(name):
try:
name = _get_name(name)
output = check_output('restart {0}'.format(name), shell=True)
except CalledProcessError as cpe:
output = cpe.output
return output
def has_service(name):
try:
_get_name(name)
return True
except ValueError:
return False
provider.add_service = staticmethod(add_service)
provider.remove_service = staticmethod(remove_service)
provider.get_service_status = staticmethod(get_service_status)
provider.disable_service = staticmethod(disable_service)
provider.enable_service = staticmethod(enable_service)
provider.start_service = staticmethod(start_service)
provider.stop_service = staticmethod(stop_service)
provider.restart_service = staticmethod(restart_service)
provider.has_service = staticmethod(has_service)
return provider
@staticmethod
def inject_process(provider):
""" Injects the Process module """
def check_process(name):
output = check_output('ps aux | grep -v grep | grep {0} || true'.format(name), shell = True)
# It returns 1 if the process is not running, else it returns 0. Don't ask questions...
return 1 if name not in output else 0
provider.checkProcess = staticmethod(check_process)
return provider
@staticmethod
def inject_package(provider):
""" Injects the Package module """
def _get_version(package):
return check_output("dpkg -s {0} | grep Version | cut -d ' ' -f 2".format(package), shell=True).strip()
def get_versions():
versions = {}
for package in ['openvstorage', 'openvstorage-alba', 'volumedriver-server', 'volumedriver-base', 'alba']:
version_info = _get_version(package)
if version_info:
versions[package] = version_info
return versions
provider.get_versions = staticmethod(get_versions)
return provider
|
|
###################################################################
# Copyright 2013-2014 All Rights Reserved
# Authors: The Paradrop Team
###################################################################
import errno
import os
import subprocess
import shutil
import six
from distutils import dir_util
# We have to import this for the decorator
from paradrop.base.output import out
# protect the original open function
__open = open
# Since we overwrite everything else, do the same to basename
basename = lambda x: os.path.basename(x)
def getMountCmd():
return "mount"
def isMount(mnt):
"""This function checks if @mnt is actually mounted."""
# TODO - need to check if partition and mount match the expected??
return os.path.ismount(mnt)
def oscall(cmd, get=False):
"""
This function performs a OS subprocess call.
All output is thrown away unless an error has occured or if @get is True
Arguments:
@cmd: the string command to run
[get] : True means return (stdout, stderr)
Returns:
None if not @get and no error
(stdout, retcode, stderr) if @get or yes error
"""
# Since we are already in a deferred chain, use subprocess to block and make the call to mount right HERE AND NOW
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = proc.communicate()
if(proc.returncode or get):
return (output, proc.returncode, errors)
else:
if(output and output != ""):
out.verbose('"%s" stdout: "%s"\n' % (cmd, output.rstrip()))
if(errors and errors != ""):
out.verbose('"%s" stderr: "%s"\n' % (cmd, errors.rstrip()))
return None
def getFileType(f):
if not exists(f):
return None
r = oscall('file "%s"' % f, True)
if(r is not None and isinstance(r, tuple)):
return r[0]
else: # pragma: no cover
return None
def exists(p):
return os.path.exists(p)
def listdir(p):
return os.listdir(p)
def unlink(p):
return os.unlink(p)
def mkdir(p):
return os.mkdir(p)
def symlink(a, b):
return os.symlink(a, b)
def ismount(p):
return os.path.ismount(p)
def fixpath(p):
"""This function is required because if we need to pass a path to something like tarfile,
we cannot overwrite the function to fix the path, so we need to expose it somehow."""
return p
def copy(a, b):
return shutil.copy(a, b)
def move(a, b):
return shutil.move(a, b)
def remove(path, suppressNotFound=False):
if (isdir(path)):
return shutil.rmtree(path)
else:
try:
os.remove(path)
except OSError as err:
# Suppress the exception if it is a file not found error and the
# suppressNotFound flag is set. Otherwise, re-raise the exception.
if not suppressNotFound or err.errno != errno.ENOENT:
raise
def isdir(a):
return os.path.isdir(a)
def isfile(a):
return os.path.isfile(a)
def copytree(a, b):
"""shutil's copytree is dumb so use distutils."""
return dir_util.copy_tree(a, b)
def open(p, mode):
return __open(p, mode)
def writeFile(filename, line, mode="a"):
"""Adds the following cfg (either str or list(str)) to this Chute's current
config file (just stored locally, not written to file."""
try:
if isinstance(line, list):
data = "\n".join(line) + "\n"
elif isinstance(line, six.string_types):
data = "%s\n" % line
else:
out.err("Bad line provided for %s\n" % filename)
return
fd = open(filename, mode)
fd.write(data)
fd.flush()
fd.close()
except Exception as e:
out.err('Unable to write file: %s\n' % (str(e)))
def write(filename, data, mode="w"):
""" Writes out a config file to the specified location.
"""
try:
fd = open(filename, mode)
fd.write(data)
fd.flush()
fd.close()
except Exception as e:
out.err('Unable to write to file: %s\n' % str(e))
def readFile(filename, array=True, delimiter="\n"):
"""
Reads in a file, the contents is NOT expected to be binary.
Arguments:
@filename: absolute path to file
@array : optional: return as array if true, return as string if False
@delimiter: optional: if returning as a string, this str specifies what to use to join the lines
Returns:
A list of strings, separated by newlines
None: if the file doesn't exist
"""
if(not exists(filename)):
return None
lines = []
with open(filename, 'r') as fd:
while(True):
line = fd.readline()
if(not line):
break
lines.append(line.rstrip())
if(array is True):
return lines
else:
return delimiter.join(lines)
def read_sys_file(path, default=None):
"""
Read a file and return the contents as a string.
This is best suited for files that store a single line of text such as
files in /sys/.
Returns the default value if an error occurs.
"""
try:
with open(path, 'r') as source:
return source.read().strip()
except:
return default
|
|
#!/usr/bin/env python
#
# Generated Mon May 2 14:23:33 2011 by parse_xsd.py version 0.4.
#
import saml2
from saml2 import SamlBase
NAMESPACE = 'http://www.w3.org/2000/09/xmldsig#'
ENCODING_BASE64 = 'http://www.w3.org/2000/09/xmldsig#base64'
DIGEST_SHA1 = 'http://www.w3.org/2000/09/xmldsig#sha1'
ALG_EXC_C14N = 'http://www.w3.org/2001/10/xml-exc-c14n#'
SIG_DSA_SHA1 = 'http://www.w3.org/2000/09/xmldsig#dsa-sha1'
SIG_RSA_SHA1 = 'http://www.w3.org/2000/09/xmldsig#rsa-sha1'
MAC_SHA1 = 'http://www.w3.org/2000/09/xmldsig#hmac-sha1'
C14N = 'http://www.w3.org/TR/2001/REC-xml-c14n-20010315'
C14N_WITH_C = 'http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments'
TRANSFORM_XSLT = 'http://www.w3.org/TR/1999/REC-xslt-19991116'
TRANSFORM_XPATH = 'http://www.w3.org/TR/1999/REC-xpath-19991116'
TRANSFORM_ENVELOPED = 'http://www.w3.org/2000/09/xmldsig#enveloped-signature'
class CryptoBinary_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:CryptoBinary element """
c_tag = 'CryptoBinary'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def crypto_binary__from_string(xml_string):
return saml2.create_class_from_xml_string(CryptoBinary_, xml_string)
class SignatureValueType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureValueType element """
c_tag = 'SignatureValueType'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Id'] = ('id', 'ID', False)
def __init__(self,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.id=id
def signature_value_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureValueType_, xml_string)
class CanonicalizationMethodType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:CanonicalizationMethodType element """
c_tag = 'CanonicalizationMethodType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Algorithm'] = ('algorithm', 'anyURI', True)
def __init__(self,
algorithm=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.algorithm=algorithm
def canonicalization_method_type__from_string(xml_string):
return saml2.create_class_from_xml_string(CanonicalizationMethodType_,
xml_string)
class TransformType_XPath(SamlBase):
c_tag = 'XPath'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def transform_type__x_path_from_string(xml_string):
return saml2.create_class_from_xml_string(TransformType_XPath, xml_string)
class TransformType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:TransformType element """
c_tag = 'TransformType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}XPath'] = ('x_path',
[TransformType_XPath])
c_cardinality['x_path'] = {"min":0}
c_attributes['Algorithm'] = ('algorithm', 'anyURI', True)
c_child_order.extend(['x_path'])
def __init__(self,
x_path=None,
algorithm=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.x_path=x_path or []
self.algorithm=algorithm
def transform_type__from_string(xml_string):
return saml2.create_class_from_xml_string(TransformType_, xml_string)
class DigestMethodType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:DigestMethodType element """
c_tag = 'DigestMethodType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Algorithm'] = ('algorithm', 'anyURI', True)
def __init__(self,
algorithm=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.algorithm=algorithm
def digest_method_type__from_string(xml_string):
return saml2.create_class_from_xml_string(DigestMethodType_, xml_string)
class DigestValueType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:DigestValueType element """
c_tag = 'DigestValueType'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def digest_value_type__from_string(xml_string):
return saml2.create_class_from_xml_string(DigestValueType_, xml_string)
class KeyName(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:KeyName element """
c_tag = 'KeyName'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def key_name_from_string(xml_string):
return saml2.create_class_from_xml_string(KeyName, xml_string)
class MgmtData(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:MgmtData element """
c_tag = 'MgmtData'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def mgmt_data_from_string(xml_string):
return saml2.create_class_from_xml_string(MgmtData, xml_string)
class X509IssuerName(SamlBase):
c_tag = 'X509IssuerName'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_issuer_name_from_string(xml_string):
return saml2.create_class_from_xml_string(X509IssuerName, xml_string)
class X509SerialNumber(SamlBase):
c_tag = 'X509SerialNumber'
c_namespace = NAMESPACE
c_value_type = {'base': 'integer'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_serial_number_from_string(xml_string):
return saml2.create_class_from_xml_string(X509SerialNumber, xml_string)
class X509IssuerSerialType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:X509IssuerSerialType element """
c_tag = 'X509IssuerSerialType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}X509IssuerName'] = ('x509_issuer_name', X509IssuerName)
c_children['{http://www.w3.org/2000/09/xmldsig#}X509SerialNumber'] = ('x509_serial_number', X509SerialNumber)
c_child_order.extend(['x509_issuer_name', 'x509_serial_number'])
def __init__(self,
x509_issuer_name=None,
x509_serial_number=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.x509_issuer_name=x509_issuer_name
self.x509_serial_number=x509_serial_number
def x509_issuer_serial_type__from_string(xml_string):
return saml2.create_class_from_xml_string(X509IssuerSerialType_, xml_string)
class PGPKeyID(SamlBase):
c_tag = 'PGPKeyID'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def pgp_key_id_from_string(xml_string):
return saml2.create_class_from_xml_string(PGPKeyID, xml_string)
class PGPKeyPacket(SamlBase):
c_tag = 'PGPKeyPacket'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def pgp_key_packet_from_string(xml_string):
return saml2.create_class_from_xml_string(PGPKeyPacket, xml_string)
class PGPDataType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:PGPDataType element """
c_tag = 'PGPDataType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}PGPKeyID'] = ('pgp_key_id', PGPKeyID)
c_children['{http://www.w3.org/2000/09/xmldsig#}PGPKeyPacket'] = ('pgp_key_packet', PGPKeyPacket)
c_cardinality['pgp_key_packet'] = {"min":0, "max":1}
c_child_order.extend(['pgp_key_id', 'pgp_key_packet'])
def __init__(self,
pgp_key_id=None,
pgp_key_packet=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.pgp_key_id=pgp_key_id
self.pgp_key_packet=pgp_key_packet
def pgp_data_type__from_string(xml_string):
return saml2.create_class_from_xml_string(PGPDataType_, xml_string)
class SPKISexp(SamlBase):
c_tag = 'SPKISexp'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def spki_sexp_from_string(xml_string):
return saml2.create_class_from_xml_string(SPKISexp, xml_string)
class SPKIDataType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SPKIDataType element """
c_tag = 'SPKIDataType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}SPKISexp'] = ('spki_sexp',
[SPKISexp])
c_cardinality['spki_sexp'] = {"min":1}
c_child_order.extend(['spki_sexp'])
def __init__(self,
spki_sexp=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.spki_sexp=spki_sexp or []
def spki_data_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SPKIDataType_, xml_string)
class ObjectType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:ObjectType element """
c_tag = 'ObjectType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Id'] = ('id', 'ID', False)
c_attributes['MimeType'] = ('mime_type', 'string', False)
c_attributes['Encoding'] = ('encoding', 'anyURI', False)
def __init__(self,
id=None,
mime_type=None,
encoding=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.id=id
self.mime_type=mime_type
self.encoding=encoding
def object_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ObjectType_, xml_string)
class SignaturePropertyType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignaturePropertyType element """
c_tag = 'SignaturePropertyType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Target'] = ('target', 'anyURI', True)
c_attributes['Id'] = ('id', 'ID', False)
def __init__(self,
target=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.target=target
self.id=id
def signature_property_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignaturePropertyType_, xml_string)
class HMACOutputLengthType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:HMACOutputLengthType element """
c_tag = 'HMACOutputLengthType'
c_namespace = NAMESPACE
c_value_type = {'base': 'integer'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def hmac_output_length_type__from_string(xml_string):
return saml2.create_class_from_xml_string(HMACOutputLengthType_, xml_string)
class P(CryptoBinary_):
c_tag = 'P'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def p_from_string(xml_string):
return saml2.create_class_from_xml_string(P, xml_string)
class Q(CryptoBinary_):
c_tag = 'Q'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def q_from_string(xml_string):
return saml2.create_class_from_xml_string(Q, xml_string)
class G(CryptoBinary_):
c_tag = 'G'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def g_from_string(xml_string):
return saml2.create_class_from_xml_string(G, xml_string)
class Y(CryptoBinary_):
c_tag = 'Y'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def y_from_string(xml_string):
return saml2.create_class_from_xml_string(Y, xml_string)
class J(CryptoBinary_):
c_tag = 'J'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def j_from_string(xml_string):
return saml2.create_class_from_xml_string(J, xml_string)
class Seed(CryptoBinary_):
c_tag = 'Seed'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def seed_from_string(xml_string):
return saml2.create_class_from_xml_string(Seed, xml_string)
class PgenCounter(CryptoBinary_):
c_tag = 'PgenCounter'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def pgen_counter_from_string(xml_string):
return saml2.create_class_from_xml_string(PgenCounter, xml_string)
class DSAKeyValueType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:DSAKeyValueType element """
c_tag = 'DSAKeyValueType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}P'] = ('p', P)
c_cardinality['p'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}Q'] = ('q', Q)
c_cardinality['q'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}G'] = ('g', G)
c_cardinality['g'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}Y'] = ('y', Y)
c_children['{http://www.w3.org/2000/09/xmldsig#}J'] = ('j', J)
c_cardinality['j'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}Seed'] = ('seed', Seed)
c_cardinality['seed'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}PgenCounter'] = ('pgen_counter',
PgenCounter)
c_cardinality['pgen_counter'] = {"min":0, "max":1}
c_child_order.extend(['p', 'q', 'g', 'y', 'j', 'seed', 'pgen_counter'])
def __init__(self,
p=None,
q=None,
g=None,
y=None,
j=None,
seed=None,
pgen_counter=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.p=p
self.q=q
self.g=g
self.y=y
self.j=j
self.seed=seed
self.pgen_counter=pgen_counter
def dsa_key_value_type__from_string(xml_string):
return saml2.create_class_from_xml_string(DSAKeyValueType_, xml_string)
class Modulus(CryptoBinary_):
c_tag = 'Modulus'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def modulus_from_string(xml_string):
return saml2.create_class_from_xml_string(Modulus, xml_string)
class Exponent(CryptoBinary_):
c_tag = 'Exponent'
c_namespace = NAMESPACE
c_children = CryptoBinary_.c_children.copy()
c_attributes = CryptoBinary_.c_attributes.copy()
c_child_order = CryptoBinary_.c_child_order[:]
c_cardinality = CryptoBinary_.c_cardinality.copy()
def exponent_from_string(xml_string):
return saml2.create_class_from_xml_string(Exponent, xml_string)
class RSAKeyValueType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:RSAKeyValueType element """
c_tag = 'RSAKeyValueType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}Modulus'] = ('modulus',
Modulus)
c_children['{http://www.w3.org/2000/09/xmldsig#}Exponent'] = ('exponent',
Exponent)
c_child_order.extend(['modulus', 'exponent'])
def __init__(self,
modulus=None,
exponent=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.modulus=modulus
self.exponent=exponent
def rsa_key_value_type__from_string(xml_string):
return saml2.create_class_from_xml_string(RSAKeyValueType_, xml_string)
class SignatureValue(SignatureValueType_):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureValue element """
c_tag = 'SignatureValue'
c_namespace = NAMESPACE
c_children = SignatureValueType_.c_children.copy()
c_attributes = SignatureValueType_.c_attributes.copy()
c_child_order = SignatureValueType_.c_child_order[:]
c_cardinality = SignatureValueType_.c_cardinality.copy()
def signature_value_from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureValue, xml_string)
class CanonicalizationMethod(CanonicalizationMethodType_):
"""The http://www.w3.org/2000/09/xmldsig#:CanonicalizationMethod element """
c_tag = 'CanonicalizationMethod'
c_namespace = NAMESPACE
c_children = CanonicalizationMethodType_.c_children.copy()
c_attributes = CanonicalizationMethodType_.c_attributes.copy()
c_child_order = CanonicalizationMethodType_.c_child_order[:]
c_cardinality = CanonicalizationMethodType_.c_cardinality.copy()
def canonicalization_method_from_string(xml_string):
return saml2.create_class_from_xml_string(CanonicalizationMethod,
xml_string)
class HMACOutputLength(HMACOutputLengthType_):
c_tag = 'HMACOutputLength'
c_namespace = NAMESPACE
c_children = HMACOutputLengthType_.c_children.copy()
c_attributes = HMACOutputLengthType_.c_attributes.copy()
c_child_order = HMACOutputLengthType_.c_child_order[:]
c_cardinality = HMACOutputLengthType_.c_cardinality.copy()
def hmac_output_length_from_string(xml_string):
return saml2.create_class_from_xml_string(HMACOutputLength, xml_string)
class SignatureMethodType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureMethodType element """
c_tag = 'SignatureMethodType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}HMACOutputLength'] = ('hmac_output_length', HMACOutputLength)
c_cardinality['hmac_output_length'] = {"min":0, "max":1}
c_attributes['Algorithm'] = ('algorithm', 'anyURI', True)
c_child_order.extend(['hmac_output_length'])
def __init__(self,
hmac_output_length=None,
algorithm=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.hmac_output_length=hmac_output_length
self.algorithm=algorithm
def signature_method_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureMethodType_, xml_string)
class Transform(TransformType_):
"""The http://www.w3.org/2000/09/xmldsig#:Transform element """
c_tag = 'Transform'
c_namespace = NAMESPACE
c_children = TransformType_.c_children.copy()
c_attributes = TransformType_.c_attributes.copy()
c_child_order = TransformType_.c_child_order[:]
c_cardinality = TransformType_.c_cardinality.copy()
def transform_from_string(xml_string):
return saml2.create_class_from_xml_string(Transform, xml_string)
class DigestMethod(DigestMethodType_):
"""The http://www.w3.org/2000/09/xmldsig#:DigestMethod element """
c_tag = 'DigestMethod'
c_namespace = NAMESPACE
c_children = DigestMethodType_.c_children.copy()
c_attributes = DigestMethodType_.c_attributes.copy()
c_child_order = DigestMethodType_.c_child_order[:]
c_cardinality = DigestMethodType_.c_cardinality.copy()
def digest_method_from_string(xml_string):
return saml2.create_class_from_xml_string(DigestMethod, xml_string)
class DigestValue(DigestValueType_):
"""The http://www.w3.org/2000/09/xmldsig#:DigestValue element """
c_tag = 'DigestValue'
c_namespace = NAMESPACE
c_children = DigestValueType_.c_children.copy()
c_attributes = DigestValueType_.c_attributes.copy()
c_child_order = DigestValueType_.c_child_order[:]
c_cardinality = DigestValueType_.c_cardinality.copy()
def digest_value_from_string(xml_string):
return saml2.create_class_from_xml_string(DigestValue, xml_string)
class X509IssuerSerial(X509IssuerSerialType_):
c_tag = 'X509IssuerSerial'
c_namespace = NAMESPACE
c_children = X509IssuerSerialType_.c_children.copy()
c_attributes = X509IssuerSerialType_.c_attributes.copy()
c_child_order = X509IssuerSerialType_.c_child_order[:]
c_cardinality = X509IssuerSerialType_.c_cardinality.copy()
def x509_issuer_serial_from_string(xml_string):
return saml2.create_class_from_xml_string(X509IssuerSerial, xml_string)
class X509SKI(SamlBase):
c_tag = 'X509SKI'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_ski_from_string(xml_string):
return saml2.create_class_from_xml_string(X509SKI, xml_string)
class X509SubjectName(SamlBase):
c_tag = 'X509SubjectName'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_subject_name_from_string(xml_string):
return saml2.create_class_from_xml_string(X509SubjectName, xml_string)
class X509Certificate(SamlBase):
c_tag = 'X509Certificate'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_certificate_from_string(xml_string):
return saml2.create_class_from_xml_string(X509Certificate, xml_string)
class X509CRL(SamlBase):
c_tag = 'X509CRL'
c_namespace = NAMESPACE
c_value_type = {'base': 'base64Binary'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def x509_crl_from_string(xml_string):
return saml2.create_class_from_xml_string(X509CRL, xml_string)
class X509DataType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:X509DataType element """
c_tag = 'X509DataType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}X509IssuerSerial'] = ('x509_issuer_serial',
X509IssuerSerial)
c_cardinality['x509_issuer_serial'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}X509SKI'] = ('x509_ski',
X509SKI)
c_cardinality['x509_ski'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}X509SubjectName'] = ('x509_subject_name',
X509SubjectName)
c_cardinality['x509_subject_name'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}X509Certificate'] = ('x509_certificate',
X509Certificate)
c_cardinality['x509_certificate'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}X509CRL'] = ('x509_crl',
X509CRL)
c_cardinality['x509_crl'] = {"min":0, "max":1}
c_child_order.extend(['x509_issuer_serial', 'x509_ski', 'x509_subject_name',
'x509_certificate', 'x509_crl'])
def __init__(self,
x509_issuer_serial=None,
x509_ski=None,
x509_subject_name=None,
x509_certificate=None,
x509_crl=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.x509_issuer_serial=x509_issuer_serial
self.x509_ski=x509_ski
self.x509_subject_name=x509_subject_name
self.x509_certificate=x509_certificate
self.x509_crl=x509_crl
def x509_data_type__from_string(xml_string):
return saml2.create_class_from_xml_string(X509DataType_, xml_string)
class PGPData(PGPDataType_):
"""The http://www.w3.org/2000/09/xmldsig#:PGPData element """
c_tag = 'PGPData'
c_namespace = NAMESPACE
c_children = PGPDataType_.c_children.copy()
c_attributes = PGPDataType_.c_attributes.copy()
c_child_order = PGPDataType_.c_child_order[:]
c_cardinality = PGPDataType_.c_cardinality.copy()
def pgp_data_from_string(xml_string):
return saml2.create_class_from_xml_string(PGPData, xml_string)
class SPKIData(SPKIDataType_):
"""The http://www.w3.org/2000/09/xmldsig#:SPKIData element """
c_tag = 'SPKIData'
c_namespace = NAMESPACE
c_children = SPKIDataType_.c_children.copy()
c_attributes = SPKIDataType_.c_attributes.copy()
c_child_order = SPKIDataType_.c_child_order[:]
c_cardinality = SPKIDataType_.c_cardinality.copy()
def spki_data_from_string(xml_string):
return saml2.create_class_from_xml_string(SPKIData, xml_string)
class Object(ObjectType_):
"""The http://www.w3.org/2000/09/xmldsig#:Object element """
c_tag = 'Object'
c_namespace = NAMESPACE
c_children = ObjectType_.c_children.copy()
c_attributes = ObjectType_.c_attributes.copy()
c_child_order = ObjectType_.c_child_order[:]
c_cardinality = ObjectType_.c_cardinality.copy()
def object_from_string(xml_string):
return saml2.create_class_from_xml_string(Object, xml_string)
class SignatureProperty(SignaturePropertyType_):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureProperty element """
c_tag = 'SignatureProperty'
c_namespace = NAMESPACE
c_children = SignaturePropertyType_.c_children.copy()
c_attributes = SignaturePropertyType_.c_attributes.copy()
c_child_order = SignaturePropertyType_.c_child_order[:]
c_cardinality = SignaturePropertyType_.c_cardinality.copy()
def signature_property_from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureProperty, xml_string)
class DSAKeyValue(DSAKeyValueType_):
"""The http://www.w3.org/2000/09/xmldsig#:DSAKeyValue element """
c_tag = 'DSAKeyValue'
c_namespace = NAMESPACE
c_children = DSAKeyValueType_.c_children.copy()
c_attributes = DSAKeyValueType_.c_attributes.copy()
c_child_order = DSAKeyValueType_.c_child_order[:]
c_cardinality = DSAKeyValueType_.c_cardinality.copy()
def dsa_key_value_from_string(xml_string):
return saml2.create_class_from_xml_string(DSAKeyValue, xml_string)
class RSAKeyValue(RSAKeyValueType_):
"""The http://www.w3.org/2000/09/xmldsig#:RSAKeyValue element """
c_tag = 'RSAKeyValue'
c_namespace = NAMESPACE
c_children = RSAKeyValueType_.c_children.copy()
c_attributes = RSAKeyValueType_.c_attributes.copy()
c_child_order = RSAKeyValueType_.c_child_order[:]
c_cardinality = RSAKeyValueType_.c_cardinality.copy()
def rsa_key_value_from_string(xml_string):
return saml2.create_class_from_xml_string(RSAKeyValue, xml_string)
class SignatureMethod(SignatureMethodType_):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureMethod element """
c_tag = 'SignatureMethod'
c_namespace = NAMESPACE
c_children = SignatureMethodType_.c_children.copy()
c_attributes = SignatureMethodType_.c_attributes.copy()
c_child_order = SignatureMethodType_.c_child_order[:]
c_cardinality = SignatureMethodType_.c_cardinality.copy()
def signature_method_from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureMethod, xml_string)
class TransformsType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:TransformsType element """
c_tag = 'TransformsType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}Transform'] = ('transform',
[Transform])
c_cardinality['transform'] = {"min":1}
c_child_order.extend(['transform'])
def __init__(self,
transform=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.transform=transform or []
def transforms_type__from_string(xml_string):
return saml2.create_class_from_xml_string(TransformsType_, xml_string)
class KeyValueType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:KeyValueType element """
c_tag = 'KeyValueType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}DSAKeyValue'] = ('dsa_key_value',
DSAKeyValue)
c_cardinality['dsa_key_value'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}RSAKeyValue'] = ('rsa_key_value',
RSAKeyValue)
c_cardinality['rsa_key_value'] = {"min":0, "max":1}
c_child_order.extend(['dsa_key_value', 'rsa_key_value'])
def __init__(self,
dsa_key_value=None,
rsa_key_value=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.dsa_key_value=dsa_key_value
self.rsa_key_value=rsa_key_value
def key_value_type__from_string(xml_string):
return saml2.create_class_from_xml_string(KeyValueType_, xml_string)
class X509Data(X509DataType_):
"""The http://www.w3.org/2000/09/xmldsig#:X509Data element """
c_tag = 'X509Data'
c_namespace = NAMESPACE
c_children = X509DataType_.c_children.copy()
c_attributes = X509DataType_.c_attributes.copy()
c_child_order = X509DataType_.c_child_order[:]
c_cardinality = X509DataType_.c_cardinality.copy()
def x509_data_from_string(xml_string):
return saml2.create_class_from_xml_string(X509Data, xml_string)
class SignaturePropertiesType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignaturePropertiesType element """
c_tag = 'SignaturePropertiesType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}SignatureProperty'] = ('signature_property', [SignatureProperty])
c_cardinality['signature_property'] = {"min":1}
c_attributes['Id'] = ('id', 'ID', False)
c_child_order.extend(['signature_property'])
def __init__(self,
signature_property=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.signature_property=signature_property or []
self.id=id
def signature_properties_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignaturePropertiesType_, xml_string)
class Transforms(TransformsType_):
"""The http://www.w3.org/2000/09/xmldsig#:Transforms element """
c_tag = 'Transforms'
c_namespace = NAMESPACE
c_children = TransformsType_.c_children.copy()
c_attributes = TransformsType_.c_attributes.copy()
c_child_order = TransformsType_.c_child_order[:]
c_cardinality = TransformsType_.c_cardinality.copy()
def transforms_from_string(xml_string):
return saml2.create_class_from_xml_string(Transforms, xml_string)
class KeyValue(KeyValueType_):
"""The http://www.w3.org/2000/09/xmldsig#:KeyValue element """
c_tag = 'KeyValue'
c_namespace = NAMESPACE
c_children = KeyValueType_.c_children.copy()
c_attributes = KeyValueType_.c_attributes.copy()
c_child_order = KeyValueType_.c_child_order[:]
c_cardinality = KeyValueType_.c_cardinality.copy()
def key_value_from_string(xml_string):
return saml2.create_class_from_xml_string(KeyValue, xml_string)
class RetrievalMethodType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:RetrievalMethodType element """
c_tag = 'RetrievalMethodType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}Transforms'] = ('transforms',
Transforms)
c_cardinality['transforms'] = {"min":0, "max":1}
c_attributes['URI'] = ('uri', 'anyURI', False)
c_attributes['Type'] = ('type', 'anyURI', False)
c_child_order.extend(['transforms'])
def __init__(self,
transforms=None,
uri=None,
type=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.transforms=transforms
self.uri=uri
self.type=type
def retrieval_method_type__from_string(xml_string):
return saml2.create_class_from_xml_string(RetrievalMethodType_, xml_string)
class SignatureProperties(SignaturePropertiesType_):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureProperties element """
c_tag = 'SignatureProperties'
c_namespace = NAMESPACE
c_children = SignaturePropertiesType_.c_children.copy()
c_attributes = SignaturePropertiesType_.c_attributes.copy()
c_child_order = SignaturePropertiesType_.c_child_order[:]
c_cardinality = SignaturePropertiesType_.c_cardinality.copy()
def signature_properties_from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureProperties, xml_string)
class ReferenceType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:ReferenceType element """
c_tag = 'ReferenceType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}Transforms'] = ('transforms',
Transforms)
c_cardinality['transforms'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}DigestMethod'] = ('digest_method',
DigestMethod)
c_children['{http://www.w3.org/2000/09/xmldsig#}DigestValue'] = ('digest_value',
DigestValue)
c_attributes['Id'] = ('id', 'ID', False)
c_attributes['URI'] = ('uri', 'anyURI', False)
c_attributes['Type'] = ('type', 'anyURI', False)
c_child_order.extend(['transforms', 'digest_method', 'digest_value'])
def __init__(self,
transforms=None,
digest_method=None,
digest_value=None,
id=None,
uri=None,
type=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.transforms=transforms
self.digest_method=digest_method
self.digest_value=digest_value
self.id=id
self.uri=uri
self.type=type
def reference_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ReferenceType_, xml_string)
class RetrievalMethod(RetrievalMethodType_):
"""The http://www.w3.org/2000/09/xmldsig#:RetrievalMethod element """
c_tag = 'RetrievalMethod'
c_namespace = NAMESPACE
c_children = RetrievalMethodType_.c_children.copy()
c_attributes = RetrievalMethodType_.c_attributes.copy()
c_child_order = RetrievalMethodType_.c_child_order[:]
c_cardinality = RetrievalMethodType_.c_cardinality.copy()
def retrieval_method_from_string(xml_string):
return saml2.create_class_from_xml_string(RetrievalMethod, xml_string)
class Reference(ReferenceType_):
"""The http://www.w3.org/2000/09/xmldsig#:Reference element """
c_tag = 'Reference'
c_namespace = NAMESPACE
c_children = ReferenceType_.c_children.copy()
c_attributes = ReferenceType_.c_attributes.copy()
c_child_order = ReferenceType_.c_child_order[:]
c_cardinality = ReferenceType_.c_cardinality.copy()
def reference_from_string(xml_string):
return saml2.create_class_from_xml_string(Reference, xml_string)
#import xmlenc as enc
class KeyInfoType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:KeyInfoType element """
c_tag = 'KeyInfoType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyName'] = ('key_name',
[KeyName])
c_cardinality['key_name'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyValue'] = ('key_value',
[KeyValue])
c_cardinality['key_value'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}RetrievalMethod'] = ('retrieval_method',
[RetrievalMethod])
c_cardinality['retrieval_method'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}X509Data'] = ('x509_data',
[X509Data])
c_cardinality['x509_data'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}PGPData'] = ('pgp_data',
[PGPData])
c_cardinality['pgp_data'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}SPKIData'] = ('spki_data',
[SPKIData])
c_cardinality['spki_data'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmldsig#}MgmtData'] = ('mgmt_data',
[MgmtData])
c_cardinality['mgmt_data'] = {"min":0}
c_children['{http://www.w3.org/2000/09/xmlenc#}EncryptedKey'] = (
'encrypted_key',
None)
c_cardinality['key_info'] = {"min":0, "max":1}
c_attributes['Id'] = ('id', 'ID', False)
c_child_order.extend(['key_name', 'key_value', 'retrieval_method',
'x509_data', 'pgp_data', 'spki_data', 'mgmt_data',
'encrypted_key'])
def __init__(self,
key_name=None,
key_value=None,
retrieval_method=None,
x509_data=None,
pgp_data=None,
spki_data=None,
mgmt_data=None,
encrypted_key=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.key_name=key_name or []
self.key_value=key_value or []
self.retrieval_method=retrieval_method or []
self.x509_data=x509_data or []
self.pgp_data=pgp_data or []
self.spki_data=spki_data or []
self.mgmt_data=mgmt_data or []
self.encrypted_key=encrypted_key
self.id=id
def key_info_type__from_string(xml_string):
return saml2.create_class_from_xml_string(KeyInfoType_, xml_string)
class ManifestType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:ManifestType element """
c_tag = 'ManifestType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}Reference'] = ('reference',
[Reference])
c_cardinality['reference'] = {"min":1}
c_attributes['Id'] = ('id', 'ID', False)
c_child_order.extend(['reference'])
def __init__(self,
reference=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.reference=reference or []
self.id=id
def manifest_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ManifestType_, xml_string)
class SignedInfoType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignedInfoType element """
c_tag = 'SignedInfoType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}CanonicalizationMethod'] = ('canonicalization_method', CanonicalizationMethod)
c_children['{http://www.w3.org/2000/09/xmldsig#}SignatureMethod'] = ('signature_method',
SignatureMethod)
c_children['{http://www.w3.org/2000/09/xmldsig#}Reference'] = ('reference',
[Reference])
c_cardinality['reference'] = {"min":1}
c_attributes['Id'] = ('id', 'ID', False)
c_child_order.extend(['canonicalization_method', 'signature_method',
'reference'])
def __init__(self,
canonicalization_method=None,
signature_method=None,
reference=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.canonicalization_method=canonicalization_method
self.signature_method=signature_method
self.reference=reference or []
self.id=id
def signed_info_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignedInfoType_, xml_string)
class KeyInfo(KeyInfoType_):
"""The http://www.w3.org/2000/09/xmldsig#:KeyInfo element """
c_tag = 'KeyInfo'
c_namespace = NAMESPACE
c_children = KeyInfoType_.c_children.copy()
c_attributes = KeyInfoType_.c_attributes.copy()
c_child_order = KeyInfoType_.c_child_order[:]
c_cardinality = KeyInfoType_.c_cardinality.copy()
def key_info_from_string(xml_string):
return saml2.create_class_from_xml_string(KeyInfo, xml_string)
class Manifest(ManifestType_):
"""The http://www.w3.org/2000/09/xmldsig#:Manifest element """
c_tag = 'Manifest'
c_namespace = NAMESPACE
c_children = ManifestType_.c_children.copy()
c_attributes = ManifestType_.c_attributes.copy()
c_child_order = ManifestType_.c_child_order[:]
c_cardinality = ManifestType_.c_cardinality.copy()
def manifest_from_string(xml_string):
return saml2.create_class_from_xml_string(Manifest, xml_string)
class SignedInfo(SignedInfoType_):
"""The http://www.w3.org/2000/09/xmldsig#:SignedInfo element """
c_tag = 'SignedInfo'
c_namespace = NAMESPACE
c_children = SignedInfoType_.c_children.copy()
c_attributes = SignedInfoType_.c_attributes.copy()
c_child_order = SignedInfoType_.c_child_order[:]
c_cardinality = SignedInfoType_.c_cardinality.copy()
def signed_info_from_string(xml_string):
return saml2.create_class_from_xml_string(SignedInfo, xml_string)
class SignatureType_(SamlBase):
"""The http://www.w3.org/2000/09/xmldsig#:SignatureType element """
c_tag = 'SignatureType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}SignedInfo'] = ('signed_info',
SignedInfo)
c_children['{http://www.w3.org/2000/09/xmldsig#}SignatureValue'] = ('signature_value', SignatureValue)
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyInfo'] = ('key_info',
KeyInfo)
c_cardinality['key_info'] = {"min":0, "max":1}
c_children['{http://www.w3.org/2000/09/xmldsig#}Object'] = ('object',
[Object])
c_cardinality['object'] = {"min":0}
c_attributes['Id'] = ('id', 'ID', False)
c_child_order.extend(['signed_info', 'signature_value', 'key_info',
'object'])
def __init__(self,
signed_info=None,
signature_value=None,
key_info=None,
object=None,
id=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.signed_info=signed_info
self.signature_value=signature_value
self.key_info=key_info
self.object=object or []
self.id=id
def signature_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SignatureType_, xml_string)
class Signature(SignatureType_):
"""The http://www.w3.org/2000/09/xmldsig#:Signature element """
c_tag = 'Signature'
c_namespace = NAMESPACE
c_children = SignatureType_.c_children.copy()
c_attributes = SignatureType_.c_attributes.copy()
c_child_order = SignatureType_.c_child_order[:]
c_cardinality = SignatureType_.c_cardinality.copy()
def signature_from_string(xml_string):
return saml2.create_class_from_xml_string(Signature, xml_string)
ELEMENT_FROM_STRING = {
CryptoBinary_.c_tag: crypto_binary__from_string,
Signature.c_tag: signature_from_string,
SignatureType_.c_tag: signature_type__from_string,
SignatureValue.c_tag: signature_value_from_string,
SignatureValueType_.c_tag: signature_value_type__from_string,
SignedInfo.c_tag: signed_info_from_string,
SignedInfoType_.c_tag: signed_info_type__from_string,
CanonicalizationMethod.c_tag: canonicalization_method_from_string,
CanonicalizationMethodType_.c_tag: canonicalization_method_type__from_string,
SignatureMethod.c_tag: signature_method_from_string,
SignatureMethodType_.c_tag: signature_method_type__from_string,
Reference.c_tag: reference_from_string,
ReferenceType_.c_tag: reference_type__from_string,
Transforms.c_tag: transforms_from_string,
TransformsType_.c_tag: transforms_type__from_string,
Transform.c_tag: transform_from_string,
TransformType_.c_tag: transform_type__from_string,
DigestMethod.c_tag: digest_method_from_string,
DigestMethodType_.c_tag: digest_method_type__from_string,
DigestValue.c_tag: digest_value_from_string,
DigestValueType_.c_tag: digest_value_type__from_string,
KeyInfo.c_tag: key_info_from_string,
KeyInfoType_.c_tag: key_info_type__from_string,
KeyName.c_tag: key_name_from_string,
MgmtData.c_tag: mgmt_data_from_string,
KeyValue.c_tag: key_value_from_string,
KeyValueType_.c_tag: key_value_type__from_string,
RetrievalMethod.c_tag: retrieval_method_from_string,
RetrievalMethodType_.c_tag: retrieval_method_type__from_string,
X509Data.c_tag: x509_data_from_string,
X509DataType_.c_tag: x509_data_type__from_string,
X509IssuerSerialType_.c_tag: x509_issuer_serial_type__from_string,
PGPData.c_tag: pgp_data_from_string,
PGPDataType_.c_tag: pgp_data_type__from_string,
SPKIData.c_tag: spki_data_from_string,
SPKIDataType_.c_tag: spki_data_type__from_string,
Object.c_tag: object_from_string,
ObjectType_.c_tag: object_type__from_string,
Manifest.c_tag: manifest_from_string,
ManifestType_.c_tag: manifest_type__from_string,
SignatureProperties.c_tag: signature_properties_from_string,
SignaturePropertiesType_.c_tag: signature_properties_type__from_string,
SignatureProperty.c_tag: signature_property_from_string,
SignaturePropertyType_.c_tag: signature_property_type__from_string,
HMACOutputLengthType_.c_tag: hmac_output_length_type__from_string,
DSAKeyValue.c_tag: dsa_key_value_from_string,
DSAKeyValueType_.c_tag: dsa_key_value_type__from_string,
RSAKeyValue.c_tag: rsa_key_value_from_string,
RSAKeyValueType_.c_tag: rsa_key_value_type__from_string,
TransformType_XPath.c_tag: transform_type__x_path_from_string,
X509IssuerName.c_tag: x509_issuer_name_from_string,
X509SerialNumber.c_tag: x509_serial_number_from_string,
PGPKeyID.c_tag: pgp_key_id_from_string,
PGPKeyPacket.c_tag: pgp_key_packet_from_string,
SPKISexp.c_tag: spki_sexp_from_string,
P.c_tag: p_from_string,
Q.c_tag: q_from_string,
G.c_tag: g_from_string,
Y.c_tag: y_from_string,
J.c_tag: j_from_string,
Seed.c_tag: seed_from_string,
PgenCounter.c_tag: pgen_counter_from_string,
Modulus.c_tag: modulus_from_string,
Exponent.c_tag: exponent_from_string,
HMACOutputLength.c_tag: hmac_output_length_from_string,
X509IssuerSerial.c_tag: x509_issuer_serial_from_string,
X509SKI.c_tag: x509_ski_from_string,
X509SubjectName.c_tag: x509_subject_name_from_string,
X509Certificate.c_tag: x509_certificate_from_string,
X509CRL.c_tag: x509_crl_from_string,
}
ELEMENT_BY_TAG = {
'CryptoBinary': CryptoBinary_,
'Signature': Signature,
'SignatureType': SignatureType_,
'SignatureValue': SignatureValue,
'SignatureValueType': SignatureValueType_,
'SignedInfo': SignedInfo,
'SignedInfoType': SignedInfoType_,
'CanonicalizationMethod': CanonicalizationMethod,
'CanonicalizationMethodType': CanonicalizationMethodType_,
'SignatureMethod': SignatureMethod,
'SignatureMethodType': SignatureMethodType_,
'Reference': Reference,
'ReferenceType': ReferenceType_,
'Transforms': Transforms,
'TransformsType': TransformsType_,
'Transform': Transform,
'TransformType': TransformType_,
'DigestMethod': DigestMethod,
'DigestMethodType': DigestMethodType_,
'DigestValue': DigestValue,
'DigestValueType': DigestValueType_,
'KeyInfo': KeyInfo,
'KeyInfoType': KeyInfoType_,
'KeyName': KeyName,
'MgmtData': MgmtData,
'KeyValue': KeyValue,
'KeyValueType': KeyValueType_,
'RetrievalMethod': RetrievalMethod,
'RetrievalMethodType': RetrievalMethodType_,
'X509Data': X509Data,
'X509DataType': X509DataType_,
'X509IssuerSerialType': X509IssuerSerialType_,
'PGPData': PGPData,
'PGPDataType': PGPDataType_,
'SPKIData': SPKIData,
'SPKIDataType': SPKIDataType_,
'Object': Object,
'ObjectType': ObjectType_,
'Manifest': Manifest,
'ManifestType': ManifestType_,
'SignatureProperties': SignatureProperties,
'SignaturePropertiesType': SignaturePropertiesType_,
'SignatureProperty': SignatureProperty,
'SignaturePropertyType': SignaturePropertyType_,
'HMACOutputLengthType': HMACOutputLengthType_,
'DSAKeyValue': DSAKeyValue,
'DSAKeyValueType': DSAKeyValueType_,
'RSAKeyValue': RSAKeyValue,
'RSAKeyValueType': RSAKeyValueType_,
'XPath': TransformType_XPath,
'X509IssuerName': X509IssuerName,
'X509SerialNumber': X509SerialNumber,
'PGPKeyID': PGPKeyID,
'PGPKeyPacket': PGPKeyPacket,
'SPKISexp': SPKISexp,
'P': P,
'Q': Q,
'G': G,
'Y': Y,
'J': J,
'Seed': Seed,
'PgenCounter': PgenCounter,
'Modulus': Modulus,
'Exponent': Exponent,
'HMACOutputLength': HMACOutputLength,
'X509IssuerSerial': X509IssuerSerial,
'X509SKI': X509SKI,
'X509SubjectName': X509SubjectName,
'X509Certificate': X509Certificate,
'X509CRL': X509CRL,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
|
|
import django
from django.db import models
from django.db.models import Manager
from django.db.models.query_utils import DeferredAttribute
from django.utils.translation import gettext_lazy as _
from model_utils import Choices
from model_utils.fields import MonitorField, SplitField, StatusField, UUIDField
from model_utils.managers import InheritanceManager, JoinManagerMixin, QueryManager
from model_utils.models import (
SaveSignalHandlingModel,
SoftDeletableModel,
StatusModel,
TimeFramedModel,
TimeStampedModel,
UUIDModel,
)
from model_utils.tracker import FieldTracker, ModelTracker
from tests.fields import MutableField
from tests.managers import CustomSoftDeleteManager
class InheritanceManagerTestRelated(models.Model):
pass
class InheritanceManagerTestParent(models.Model):
# FileField is just a handy descriptor-using field. Refs #6.
non_related_field_using_descriptor = models.FileField(upload_to="test")
related = models.ForeignKey(
InheritanceManagerTestRelated, related_name="imtests", null=True,
on_delete=models.CASCADE)
normal_field = models.TextField()
related_self = models.OneToOneField(
"self", related_name="imtests_self", null=True,
on_delete=models.CASCADE)
objects = InheritanceManager()
def __str__(self):
return "{}({})".format(
self.__class__.__name__[len('InheritanceManagerTest'):],
self.pk,
)
class InheritanceManagerTestChild1(InheritanceManagerTestParent):
non_related_field_using_descriptor_2 = models.FileField(upload_to="test")
normal_field_2 = models.TextField()
objects = InheritanceManager()
class InheritanceManagerTestGrandChild1(InheritanceManagerTestChild1):
text_field = models.TextField()
class InheritanceManagerTestGrandChild1_2(InheritanceManagerTestChild1):
text_field = models.TextField()
class InheritanceManagerTestChild2(InheritanceManagerTestParent):
non_related_field_using_descriptor_2 = models.FileField(upload_to="test")
normal_field_2 = models.TextField()
class InheritanceManagerTestChild3(InheritanceManagerTestParent):
parent_ptr = models.OneToOneField(
InheritanceManagerTestParent, related_name='manual_onetoone',
parent_link=True, on_delete=models.CASCADE)
class InheritanceManagerTestChild4(InheritanceManagerTestParent):
other_onetoone = models.OneToOneField(
InheritanceManagerTestParent, related_name='non_inheritance_relation',
parent_link=False, on_delete=models.CASCADE)
# The following is needed because of that Django bug:
# https://code.djangoproject.com/ticket/29998
parent_ptr = models.OneToOneField(
InheritanceManagerTestParent, related_name='child4_onetoone',
parent_link=True, on_delete=models.CASCADE)
class TimeStamp(TimeStampedModel):
test_field = models.PositiveSmallIntegerField(default=0)
class TimeFrame(TimeFramedModel):
pass
class TimeFrameManagerAdded(TimeFramedModel):
pass
class Monitored(models.Model):
name = models.CharField(max_length=25)
name_changed = MonitorField(monitor="name")
class MonitorWhen(models.Model):
name = models.CharField(max_length=25)
name_changed = MonitorField(monitor="name", when=["Jose", "Maria"])
class MonitorWhenEmpty(models.Model):
name = models.CharField(max_length=25)
name_changed = MonitorField(monitor="name", when=[])
class DoubleMonitored(models.Model):
name = models.CharField(max_length=25)
name_changed = MonitorField(monitor="name")
name2 = models.CharField(max_length=25)
name_changed2 = MonitorField(monitor="name2")
class Status(StatusModel):
STATUS = Choices(
("active", _("active")),
("deleted", _("deleted")),
("on_hold", _("on hold")),
)
class StatusPlainTuple(StatusModel):
STATUS = (
("active", _("active")),
("deleted", _("deleted")),
("on_hold", _("on hold")),
)
class StatusManagerAdded(StatusModel):
STATUS = (
("active", _("active")),
("deleted", _("deleted")),
("on_hold", _("on hold")),
)
class StatusCustomManager(Manager):
pass
class AbstractStatusCustomManager(StatusModel):
STATUS = Choices(
("first_choice", _("First choice")),
("second_choice", _("Second choice")),
)
objects = StatusCustomManager()
class Meta:
abstract = True
class StatusCustomManager(AbstractStatusCustomManager):
title = models.CharField(max_length=50)
class Post(models.Model):
published = models.BooleanField(default=False)
confirmed = models.BooleanField(default=False)
order = models.IntegerField()
objects = models.Manager()
public = QueryManager(published=True)
public_confirmed = QueryManager(
models.Q(published=True) & models.Q(confirmed=True))
public_reversed = QueryManager(published=True).order_by("-order")
class Meta:
ordering = ("order",)
class Article(models.Model):
title = models.CharField(max_length=50)
body = SplitField()
class SplitFieldAbstractParent(models.Model):
content = SplitField()
class Meta:
abstract = True
class NoRendered(models.Model):
"""
Test that the no_excerpt_field keyword arg works. This arg should
never be used except by the South model-freezing.
"""
body = SplitField(no_excerpt_field=True)
class AuthorMixin:
def by_author(self, name):
return self.filter(author=name)
class PublishedMixin:
def published(self):
return self.filter(published=True)
def unpublished(self):
return self.filter(published=False)
class ByAuthorQuerySet(models.query.QuerySet, AuthorMixin):
pass
class FeaturedManager(models.Manager):
def get_queryset(self):
kwargs = {}
if hasattr(self, "_db"):
kwargs["using"] = self._db
return ByAuthorQuerySet(self.model, **kwargs).filter(feature=True)
class AbstractTracked(models.Model):
number = 1
class Meta:
abstract = True
class Tracked(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
mutable = MutableField(default=None)
tracker = FieldTracker()
def save(self, *args, **kwargs):
""" No-op save() to ensure that FieldTracker.patch_save() works. """
super().save(*args, **kwargs)
class TrackerTimeStamped(TimeStampedModel):
name = models.CharField(max_length=20)
number = models.IntegerField()
mutable = MutableField(default=None)
tracker = FieldTracker()
def save(self, *args, **kwargs):
""" Automatically add "modified" to update_fields."""
update_fields = kwargs.get('update_fields')
if update_fields is not None:
kwargs['update_fields'] = set(update_fields) | {'modified'}
super().save(*args, **kwargs)
class TrackedFK(models.Model):
fk = models.ForeignKey('Tracked', on_delete=models.CASCADE)
tracker = FieldTracker()
custom_tracker = FieldTracker(fields=['fk_id'])
custom_tracker_without_id = FieldTracker(fields=['fk'])
class TrackedAbstract(AbstractTracked):
name = models.CharField(max_length=20)
number = models.IntegerField()
mutable = MutableField(default=None)
tracker = FieldTracker()
class TrackedNotDefault(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
name_tracker = FieldTracker(fields=['name'])
class TrackedNonFieldAttr(models.Model):
number = models.FloatField()
@property
def rounded(self):
return round(self.number) if self.number is not None else None
tracker = FieldTracker(fields=['rounded'])
class TrackedMultiple(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
name_tracker = FieldTracker(fields=['name'])
number_tracker = FieldTracker(fields=['number'])
class TrackedFileField(models.Model):
some_file = models.FileField(upload_to='test_location')
tracker = FieldTracker()
class InheritedTracked(Tracked):
name2 = models.CharField(max_length=20)
class InheritedTrackedFK(TrackedFK):
custom_tracker = FieldTracker(fields=['fk_id'])
custom_tracker_without_id = FieldTracker(fields=['fk'])
class ModelTracked(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
mutable = MutableField(default=None)
tracker = ModelTracker()
class ModelTrackedFK(models.Model):
fk = models.ForeignKey('ModelTracked', on_delete=models.CASCADE)
tracker = ModelTracker()
custom_tracker = ModelTracker(fields=['fk_id'])
custom_tracker_without_id = ModelTracker(fields=['fk'])
class ModelTrackedNotDefault(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
name_tracker = ModelTracker(fields=['name'])
class ModelTrackedMultiple(models.Model):
name = models.CharField(max_length=20)
number = models.IntegerField()
name_tracker = ModelTracker(fields=['name'])
number_tracker = ModelTracker(fields=['number'])
class InheritedModelTracked(ModelTracked):
name2 = models.CharField(max_length=20)
class StatusFieldDefaultFilled(models.Model):
STATUS = Choices((0, "no", "No"), (1, "yes", "Yes"))
status = StatusField(default=STATUS.yes)
class StatusFieldDefaultNotFilled(models.Model):
STATUS = Choices((0, "no", "No"), (1, "yes", "Yes"))
status = StatusField()
class StatusFieldChoicesName(models.Model):
NAMED_STATUS = Choices((0, "no", "No"), (1, "yes", "Yes"))
status = StatusField(choices_name='NAMED_STATUS')
class SoftDeletable(SoftDeletableModel):
"""
Test model with additional manager for full access to model
instances.
"""
name = models.CharField(max_length=20)
all_objects = models.Manager()
class CustomSoftDelete(SoftDeletableModel):
is_read = models.BooleanField(default=False)
objects = CustomSoftDeleteManager()
class StringyDescriptor:
"""
Descriptor that returns a string version of the underlying integer value.
"""
def __init__(self, name):
self.name = name
def __get__(self, obj, cls=None):
if obj is None:
return self
if self.name in obj.get_deferred_fields():
# This queries the database, and sets the value on the instance.
if django.VERSION < (3, 0):
DeferredAttribute(field_name=self.name).__get__(obj, cls)
else:
# Since Django 3.0, DeferredAttribute wants a field argument.
fields_map = {f.name: f for f in cls._meta.fields}
field = fields_map[self.name]
DeferredAttribute(field=field).__get__(obj, cls)
return str(obj.__dict__[self.name])
def __set__(self, obj, value):
obj.__dict__[self.name] = int(value)
def __delete__(self, obj):
del obj.__dict__[self.name]
class CustomDescriptorField(models.IntegerField):
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
setattr(cls, name, StringyDescriptor(name))
class ModelWithCustomDescriptor(models.Model):
custom_field = CustomDescriptorField()
tracked_custom_field = CustomDescriptorField()
regular_field = models.IntegerField()
tracked_regular_field = models.IntegerField()
tracker = FieldTracker(fields=['tracked_custom_field', 'tracked_regular_field'])
class JoinManager(JoinManagerMixin, models.Manager):
pass
class BoxJoinModel(models.Model):
name = models.CharField(max_length=32)
objects = JoinManager()
class JoinItemForeignKey(models.Model):
weight = models.IntegerField()
belonging = models.ForeignKey(
BoxJoinModel,
null=True,
on_delete=models.CASCADE
)
objects = JoinManager()
class CustomUUIDModel(UUIDModel):
pass
class CustomNotPrimaryUUIDModel(models.Model):
uuid = UUIDField(primary_key=False)
class SaveSignalHandlingTestModel(SaveSignalHandlingModel):
name = models.CharField(max_length=20)
class TimeStampWithStatusModel(TimeStampedModel, StatusModel):
STATUS = Choices(
("active", _("active")),
("deleted", _("deleted")),
("on_hold", _("on hold")),
)
test_field = models.PositiveSmallIntegerField(default=0)
|
|
from textwrap import dedent
from typing import Optional
import pytest
from pip._internal.build_env import BuildEnvironment
from tests.lib import (
PipTestEnvironment,
TestPipResult,
create_basic_wheel_for_package,
make_test_finder,
)
def indent(text: str, prefix: str) -> str:
return "\n".join((prefix if line else "") + line for line in text.split("\n"))
def run_with_build_env(
script: PipTestEnvironment,
setup_script_contents: str,
test_script_contents: Optional[str] = None,
) -> TestPipResult:
build_env_script = script.scratch_path / "build_env.py"
build_env_script.write_text(
dedent(
"""
import subprocess
import sys
from pip._internal.build_env import BuildEnvironment
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.search_scope import SearchScope
from pip._internal.models.selection_prefs import (
SelectionPreferences
)
from pip._internal.network.session import PipSession
from pip._internal.utils.temp_dir import global_tempdir_manager
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope.create([{scratch!r}], []),
)
selection_prefs = SelectionPreferences(
allow_yanked=True,
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
use_deprecated_html5lib=False,
)
with global_tempdir_manager():
build_env = BuildEnvironment()
""".format(
scratch=str(script.scratch_path)
)
)
+ indent(dedent(setup_script_contents), " ")
+ indent(
dedent(
"""
if len(sys.argv) > 1:
with build_env:
subprocess.check_call((sys.executable, sys.argv[1]))
"""
),
" ",
)
)
args = ["python", build_env_script]
if test_script_contents is not None:
test_script = script.scratch_path / "test.py"
test_script.write_text(dedent(test_script_contents))
args.append(test_script)
return script.run(*args)
def test_build_env_allow_empty_requirements_install() -> None:
finder = make_test_finder()
build_env = BuildEnvironment()
for prefix in ("normal", "overlay"):
build_env.install_requirements(
finder, [], prefix, kind="Installing build dependencies"
)
def test_build_env_allow_only_one_install(script: PipTestEnvironment) -> None:
create_basic_wheel_for_package(script, "foo", "1.0")
create_basic_wheel_for_package(script, "bar", "1.0")
finder = make_test_finder(find_links=[script.scratch_path])
build_env = BuildEnvironment()
for prefix in ("normal", "overlay"):
build_env.install_requirements(
finder, ["foo"], prefix, kind=f"installing foo in {prefix}"
)
with pytest.raises(AssertionError):
build_env.install_requirements(
finder, ["bar"], prefix, kind=f"installing bar in {prefix}"
)
with pytest.raises(AssertionError):
build_env.install_requirements(
finder, [], prefix, kind=f"installing in {prefix}"
)
def test_build_env_requirements_check(script: PipTestEnvironment) -> None:
create_basic_wheel_for_package(script, "foo", "2.0")
create_basic_wheel_for_package(script, "bar", "1.0")
create_basic_wheel_for_package(script, "bar", "3.0")
create_basic_wheel_for_package(script, "other", "0.5")
script.pip_install_local("-f", script.scratch_path, "foo", "bar", "other")
run_with_build_env(
script,
"""
r = build_env.check_requirements(['foo', 'bar', 'other'])
assert r == (set(), {'foo', 'bar', 'other'}), repr(r)
r = build_env.check_requirements(['foo>1.0', 'bar==3.0'])
assert r == (set(), {'foo>1.0', 'bar==3.0'}), repr(r)
r = build_env.check_requirements(['foo>3.0', 'bar>=2.5'])
assert r == (set(), {'foo>3.0', 'bar>=2.5'}), repr(r)
""",
)
run_with_build_env(
script,
"""
build_env.install_requirements(finder, ['foo', 'bar==3.0'], 'normal',
kind='installing foo in normal')
r = build_env.check_requirements(['foo', 'bar', 'other'])
assert r == (set(), {'other'}), repr(r)
r = build_env.check_requirements(['foo>1.0', 'bar==3.0'])
assert r == (set(), set()), repr(r)
r = build_env.check_requirements(['foo>3.0', 'bar>=2.5'])
assert r == ({('foo==2.0', 'foo>3.0')}, set()), repr(r)
""",
)
run_with_build_env(
script,
"""
build_env.install_requirements(finder, ['foo', 'bar==3.0'], 'normal',
kind='installing foo in normal')
build_env.install_requirements(finder, ['bar==1.0'], 'overlay',
kind='installing foo in overlay')
r = build_env.check_requirements(['foo', 'bar', 'other'])
assert r == (set(), {'other'}), repr(r)
r = build_env.check_requirements(['foo>1.0', 'bar==3.0'])
assert r == ({('bar==1.0', 'bar==3.0')}, set()), repr(r)
r = build_env.check_requirements(['foo>3.0', 'bar>=2.5'])
assert r == ({('bar==1.0', 'bar>=2.5'), ('foo==2.0', 'foo>3.0')}, \
set()), repr(r)
""",
)
def test_build_env_overlay_prefix_has_priority(script: PipTestEnvironment) -> None:
create_basic_wheel_for_package(script, "pkg", "2.0")
create_basic_wheel_for_package(script, "pkg", "4.3")
result = run_with_build_env(
script,
"""
build_env.install_requirements(finder, ['pkg==2.0'], 'overlay',
kind='installing pkg==2.0 in overlay')
build_env.install_requirements(finder, ['pkg==4.3'], 'normal',
kind='installing pkg==4.3 in normal')
""",
"""
print(__import__('pkg').__version__)
""",
)
assert result.stdout.strip() == "2.0", str(result)
@pytest.mark.incompatible_with_test_venv
def test_build_env_isolation(script: PipTestEnvironment) -> None:
# Create dummy `pkg` wheel.
pkg_whl = create_basic_wheel_for_package(script, "pkg", "1.0")
# Install it to site packages.
script.pip_install_local(pkg_whl)
# And a copy in the user site.
script.pip_install_local("--ignore-installed", "--user", pkg_whl)
# And to another directory available through a .pth file.
target = script.scratch_path / "pth_install"
script.pip_install_local("-t", target, pkg_whl)
(script.site_packages_path / "build_requires.pth").write_text(str(target) + "\n")
# And finally to yet another directory available through PYTHONPATH.
target = script.scratch_path / "pypath_install"
script.pip_install_local("-t", target, pkg_whl)
script.environ["PYTHONPATH"] = target
run_with_build_env(
script,
"",
r"""
from distutils.sysconfig import get_python_lib
import sys
try:
import pkg
except ImportError:
pass
else:
print(
f'imported `pkg` from `{pkg.__file__}`',
file=sys.stderr)
print('system sites:\n ' + '\n '.join(sorted({
get_python_lib(plat_specific=0),
get_python_lib(plat_specific=1),
})), file=sys.stderr)
print('sys.path:\n ' + '\n '.join(sys.path), file=sys.stderr)
sys.exit(1)
""",
)
|
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ctypes import *
import os
# load from installation. The library lookup in ctypes is a bit strange,
# so make sure you are loading correct one.
libpqdir = '%s/lib' % (os.environ.get('GPHOME'))
try:
libpq = CDLL(os.path.join(libpqdir, 'libpq.dylib'))
except:
libpq = CDLL(os.path.join(libpqdir, 'libpq.so'))
# See src/interfaces/libpq/libpq-fe.h for more details.
CONNECTION_OK = 0
CONNECTION_BAD = 1
CONNECTION_STARTED = 2
CONNECTION_MADE = 3
CONNECTION_AWAITING_RESPONSE = 4
CONNECTION_AUTH_OK = 5
CONNECTION_SETENV = 6
CONNECTION_SSL_STARTUP = 7
CONNECTION_NEEDED = 8
PGRES_EMPTY_QUERY = 0
PGRES_COMMAND_OK = 1
PGRES_TUPLES_OK = 2
PGRES_COPY_OUT = 3
PGRES_COPY_IN = 4
PGRES_BAD_RESPONSE = 5
PGRES_NONFATAL_ERROR = 6
PGRES_FATAL_ERROR = 7
PGRES_COPY_BOTH = 8
PGRES_SINGLE_TUPLE = 9
libpq.PQerrorMessage.restype = c_char_p
libpq.PQresStatus.restype = c_char_p
libpq.PQresultErrorMessage.restype = c_char_p
libpq.PQfname.restype = c_char_p
libpq.PQgetvalue.restype = c_char_p
# src/include/catalog/pg_type.h
BOOLOID = 16
CHAROID = 18
NAMEOID = 19
INT8OID = 20
INT2OID = 21
INT4OID = 23
TEXTOID = 25
OIDOID = 26
FLOAT4OID = 700
FLOAT8OID = 701
BPCHAROID = 1042
VARCHAROID = 1043
DATEOID = 1082
TIMEOID = 1083
TIMESTAMPOID = 1114
TIMESTAMPTZOID = 1184
NoticeReceiverFunc = CFUNCTYPE(None, c_void_p, c_void_p)
class PGconn(object):
def __init__(self, conninfo):
if type(conninfo) == str:
self.conninfo = conninfo
self.conn = libpq.PQconnectdb(conninfo)
else:
# otherwise, assume it's a PGconn object
self.conn = conninfo
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.finish()
def status(self):
return libpq.PQstatus(self.conn)
def error_message(self):
return libpq.PQerrorMessage(self.conn)
def finish(self):
if self.conn:
libpq.PQfinish(self.conn)
self.conn = None
def execute(self, query):
res = libpq.PQexec(self.conn, query)
return PGresult(res)
# Async supports
def send_query(self, query):
return libpq.PQsendQuery(self.conn, query)
def consume_input(self):
return libpq.PQconsumeInput(self.conn)
def is_busy(self):
return libpq.PQisBusy(self.conn)
def get_result(self):
res = libpq.PQgetResult(self.conn)
return PGresult(res)
def set_notice_receiver(self, proc, arg):
return libpq.PQsetNoticeReceiver(self.conn, proc, arg)
def fileno(self):
# for select call
return libpq.PQsocket(self.conn)
class PGresult(object):
class Tuple(object):
pass
def __init__(self, res):
self.res = res
def status(self):
return libpq.PQresultStatus(self.res)
def error_message(self):
return libpq.PQresultErrorMessage(self.res)
def clear(self):
if self.res:
libpq.PQclear(self.res)
self.res = None
def ntuples(self):
return libpq.PQntuples(self.res)
def nfields(self):
return libpq.PQnfields(self.res)
def fname(self, col):
return str(libpq.PQfname(self.res, col))
def getvalue(self, row, col, convert=False):
"""Returns value at the position of row and column.
If convert parameter is set to be True, the value will be
converted to appropriate python object based on type oid,
otherwise all values are python string.
SQL NULL value will be returned as None in any case.
"""
if libpq.PQgetisnull(self.res, row, col) == 1:
return None
val = libpq.PQgetvalue(self.res, row, col)
if convert:
valtype = libpq.PQftype(self.res, col)
return convert_type(val, valtype)
else:
return str(val)
def getpyvalue(self, row, col):
"""Returns value as python object mapped by type oid"""
return self.getvalue(row, col, convert=True)
def tuples(self, to_obj=False, convert=False):
"""Returns result set in either list of list or object, if to_obj
parameter is True, which has attributes of column name with the
value. The values are converted to python objects if convert
parameter is True.
"""
result = list()
for row in range(self.ntuples()):
if to_obj:
tup = PGresult.Tuple()
else:
tup = list()
for col in xrange(self.nfields()):
val = self.getvalue(row, col, convert)
if to_obj:
setattr(tup, self.fname(col), val)
else:
tup.append(val)
result.append(tup)
return result
def pytuples(self):
"""Returns result set as a list of list."""
return self.tuples(convert=True)
def objects(self):
"""Returns result set as a list of object, without conversion."""
return self.tuples(to_obj=True)
def pyobjects(self):
"""Returns result set as a list of object, with each value
mapped to python object.
"""
return self.tuples(to_obj=True, convert=True)
def convert_type(cstr, typid):
if typid == BOOLOID:
return bool(cstr == 't')
elif (typid == INT8OID or typid == INT2OID or typid == INT4OID or
typid == OIDOID):
return int(cstr)
elif typid == FLOAT4OID or typid == FLOAT8OID:
return float(cstr)
return str(cstr)
|
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
'''
Redis checks
'''
# stdlib
from collections import defaultdict
import re
import time
# 3rd party
import redis
# project
from checks import AgentCheck
DEFAULT_MAX_SLOW_ENTRIES = 128
MAX_SLOW_ENTRIES_KEY = "slowlog-max-len"
REPL_KEY = 'master_link_status'
LINK_DOWN_KEY = 'master_link_down_since_seconds'
class Redis(AgentCheck):
db_key_pattern = re.compile(r'^db\d+')
slave_key_pattern = re.compile(r'^slave\d+')
subkeys = ['keys', 'expires']
SOURCE_TYPE_NAME = 'redis'
GAUGE_KEYS = {
# Append-only metrics
'aof_last_rewrite_time_sec': 'redis.aof.last_rewrite_time',
'aof_rewrite_in_progress': 'redis.aof.rewrite',
'aof_current_size': 'redis.aof.size',
'aof_buffer_length': 'redis.aof.buffer_length',
# Network
'connected_clients': 'redis.net.clients',
'connected_slaves': 'redis.net.slaves',
'rejected_connections': 'redis.net.rejected',
# clients
'blocked_clients': 'redis.clients.blocked',
'client_biggest_input_buf': 'redis.clients.biggest_input_buf',
'client_longest_output_list': 'redis.clients.longest_output_list',
# Keys
'evicted_keys': 'redis.keys.evicted',
'expired_keys': 'redis.keys.expired',
# stats
'latest_fork_usec': 'redis.perf.latest_fork_usec',
'bytes_received_per_sec': 'redis.bytes_received_per_sec',
'bytes_sent_per_sec': 'redis.bytes_sent_per_sec',
# Note: 'bytes_received_per_sec' and 'bytes_sent_per_sec' are only
# available on Azure Redis
# pubsub
'pubsub_channels': 'redis.pubsub.channels',
'pubsub_patterns': 'redis.pubsub.patterns',
# rdb
'rdb_bgsave_in_progress': 'redis.rdb.bgsave',
'rdb_changes_since_last_save': 'redis.rdb.changes_since_last',
'rdb_last_bgsave_time_sec': 'redis.rdb.last_bgsave_time',
# memory
'mem_fragmentation_ratio': 'redis.mem.fragmentation_ratio',
'used_memory': 'redis.mem.used',
'used_memory_lua': 'redis.mem.lua',
'used_memory_peak': 'redis.mem.peak',
'used_memory_rss': 'redis.mem.rss',
'maxmemory': 'redis.mem.maxmemory',
# replication
'master_last_io_seconds_ago': 'redis.replication.last_io_seconds_ago',
'master_sync_in_progress': 'redis.replication.sync',
'master_sync_left_bytes': 'redis.replication.sync_left_bytes',
'repl_backlog_histlen': 'redis.replication.backlog_histlen',
'master_repl_offset': 'redis.replication.master_repl_offset',
'slave_repl_offset': 'redis.replication.slave_repl_offset',
}
RATE_KEYS = {
# cpu
'used_cpu_sys': 'redis.cpu.sys',
'used_cpu_sys_children': 'redis.cpu.sys_children',
'used_cpu_user': 'redis.cpu.user',
'used_cpu_user_children': 'redis.cpu.user_children',
# stats
'keyspace_hits': 'redis.stats.keyspace_hits',
'keyspace_misses': 'redis.stats.keyspace_misses',
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.connections = {}
self.last_timestamp_seen = defaultdict(int)
def get_library_versions(self):
return {"redis": redis.__version__}
def _parse_dict_string(self, string, key, default):
"""Take from a more recent redis.py, parse_info"""
try:
for item in string.split(','):
k, v = item.rsplit('=', 1)
if k == key:
try:
return int(v)
except ValueError:
return v
return default
except Exception:
self.log.exception("Cannot parse dictionary string: %s" % string)
return default
def _generate_instance_key(self, instance):
if 'unix_socket_path' in instance:
return (instance.get('unix_socket_path'), instance.get('db'))
else:
return (instance.get('host'), instance.get('port'), instance.get('db'))
def _get_conn(self, instance):
key = self._generate_instance_key(instance)
if key not in self.connections:
try:
# Only send useful parameters to the redis client constructor
list_params = ['host', 'port', 'db', 'password', 'socket_timeout',
'connection_pool', 'charset', 'errors', 'unix_socket_path', 'ssl',
'ssl_certfile', 'ssl_keyfile', 'ssl_ca_certs', 'ssl_cert_reqs']
# Set a default timeout (in seconds) if no timeout is specified in the instance config
instance['socket_timeout'] = instance.get('socket_timeout', 5)
connection_params = dict((k, instance[k]) for k in list_params if k in instance)
self.connections[key] = redis.Redis(**connection_params)
except TypeError:
raise Exception("You need a redis library that supports authenticated connections. Try sudo easy_install redis.")
return self.connections[key]
def _get_tags(self, custom_tags, instance):
tags = set(custom_tags or [])
if 'unix_socket_path' in instance:
tags_to_add = [
"redis_host:%s" % instance.get("unix_socket_path"),
"redis_port:unix_socket",
]
else:
tags_to_add = [
"redis_host:%s" % instance.get('host'),
"redis_port:%s" % instance.get('port')
]
tags = sorted(tags.union(tags_to_add))
return tags
def _check_db(self, instance, custom_tags=None):
conn = self._get_conn(instance)
tags = self._get_tags(custom_tags, instance)
# Ping the database for info, and track the latency.
# Process the service check: the check passes if we can connect to Redis
start = time.time()
info = None
try:
info = conn.info()
tags = sorted(tags + ["redis_role:%s" % info["role"]])
status = AgentCheck.OK
self.service_check('redis.can_connect', status, tags=tags)
self._collect_metadata(info)
except ValueError:
status = AgentCheck.CRITICAL
self.service_check('redis.can_connect', status, tags=tags)
raise
except Exception:
status = AgentCheck.CRITICAL
self.service_check('redis.can_connect', status, tags=tags)
raise
latency_ms = round((time.time() - start) * 1000, 2)
self.gauge('redis.info.latency_ms', latency_ms, tags=tags)
# Save the database statistics.
for key in info.keys():
if self.db_key_pattern.match(key):
db_tags = list(tags) + ["redis_db:" + key]
# allows tracking percentage of expired keys as DD does not
# currently allow arithmetic on metric for monitoring
expires_keys = info[key]["expires"]
total_keys = info[key]["keys"]
persist_keys = total_keys - expires_keys
self.gauge("redis.persist", persist_keys, tags=db_tags)
self.gauge("redis.persist.percent", 100.0 * persist_keys / total_keys, tags=db_tags)
self.gauge("redis.expires.percent", 100.0 * expires_keys / total_keys, tags=db_tags)
for subkey in self.subkeys:
# Old redis module on ubuntu 10.04 (python-redis 0.6.1) does not
# returns a dict for those key but a string: keys=3,expires=0
# Try to parse it (see lighthouse #46)
val = -1
try:
val = info[key].get(subkey, -1)
except AttributeError:
val = self._parse_dict_string(info[key], subkey, -1)
metric = '.'.join(['redis', subkey])
self.gauge(metric, val, tags=db_tags)
# Save a subset of db-wide statistics
for info_name, value in info.iteritems():
if info_name in self.GAUGE_KEYS:
self.gauge(self.GAUGE_KEYS[info_name], info[info_name], tags=tags)
elif info_name in self.RATE_KEYS:
self.rate(self.RATE_KEYS[info_name], info[info_name], tags=tags)
# Save the number of commands.
self.rate('redis.net.commands', info['total_commands_processed'],
tags=tags)
if 'instantaneous_ops_per_sec' in info:
self.gauge('redis.net.instantaneous_ops_per_sec', info['instantaneous_ops_per_sec'],
tags=tags)
# Check some key lengths if asked
key_list = instance.get('keys')
if key_list is not None:
if not isinstance(key_list, list) or len(key_list) == 0:
self.warning("keys in redis configuration is either not a list or empty")
else:
l_tags = list(tags)
for key in key_list:
key_type = conn.type(key)
key_tags = l_tags + ['key:' + key]
if key_type == 'list':
self.gauge('redis.key.length', conn.llen(key), tags=key_tags)
elif key_type == 'set':
self.gauge('redis.key.length', conn.scard(key), tags=key_tags)
elif key_type == 'zset':
self.gauge('redis.key.length', conn.zcard(key), tags=key_tags)
elif key_type == 'hash':
self.gauge('redis.key.length', conn.hlen(key), tags=key_tags)
else:
# If the type is unknown, it might be because the key doesn't exist,
# which can be because the list is empty. So always send 0 in that case.
if instance.get("warn_on_missing_keys", True):
self.warning("{0} key not found in redis".format(key))
self.gauge('redis.key.length', 0, tags=key_tags)
self._check_replication(info, tags)
if instance.get("command_stats", False):
self._check_command_stats(conn, tags)
def _check_replication(self, info, tags):
# Save the replication delay for each slave
for key in info:
if self.slave_key_pattern.match(key) and isinstance(info[key], dict):
slave_offset = info[key].get('offset')
master_offset = info.get('master_repl_offset')
if slave_offset and master_offset and master_offset - slave_offset >= 0:
delay = master_offset - slave_offset
# Add id, ip, and port tags for the slave
slave_tags = tags[:]
for slave_tag in ('ip', 'port'):
if slave_tag in info[key]:
slave_tags.append('slave_{0}:{1}'.format(slave_tag, info[key][slave_tag]))
slave_tags.append('slave_id:%s' % key.lstrip('slave'))
self.gauge('redis.replication.delay', delay, tags=slave_tags)
if REPL_KEY in info:
if info[REPL_KEY] == 'up':
status = AgentCheck.OK
down_seconds = 0
else:
status = AgentCheck.CRITICAL
down_seconds = info[LINK_DOWN_KEY]
self.service_check('redis.replication.master_link_status', status, tags=tags)
self.gauge('redis.replication.master_link_down_since_seconds', down_seconds, tags=tags)
def _check_slowlog(self, instance, custom_tags):
"""Retrieve length and entries from Redis' SLOWLOG
This will parse through all entries of the SLOWLOG and select ones
within the time range between the last seen entries and now
"""
conn = self._get_conn(instance)
tags = self._get_tags(custom_tags, instance)
if not instance.get(MAX_SLOW_ENTRIES_KEY):
try:
max_slow_entries = int(conn.config_get(MAX_SLOW_ENTRIES_KEY)[MAX_SLOW_ENTRIES_KEY])
if max_slow_entries > DEFAULT_MAX_SLOW_ENTRIES:
self.warning("Redis {0} is higher than {1}. Defaulting to {1}."
"If you need a higher value, please set {0} in your check config"
.format(MAX_SLOW_ENTRIES_KEY, DEFAULT_MAX_SLOW_ENTRIES))
max_slow_entries = DEFAULT_MAX_SLOW_ENTRIES
# No config on AWS Elasticache
except redis.ResponseError:
max_slow_entries = DEFAULT_MAX_SLOW_ENTRIES
else:
max_slow_entries = int(instance.get(MAX_SLOW_ENTRIES_KEY))
# Generate a unique id for this instance to be persisted across runs
ts_key = self._generate_instance_key(instance)
# Get all slowlog entries
slowlogs = conn.slowlog_get(max_slow_entries)
# Find slowlog entries between last timestamp and now using start_time
slowlogs = [s for s in slowlogs if s['start_time'] >
self.last_timestamp_seen[ts_key]]
max_ts = 0
# Slowlog entry looks like:
# {'command': 'LPOP somekey',
# 'duration': 11238,
# 'id': 496L,
# 'start_time': 1422529869}
for slowlog in slowlogs:
if slowlog['start_time'] > max_ts:
max_ts = slowlog['start_time']
slowlog_tags = list(tags)
command = slowlog['command'].split()
# When the "Garantia Data" custom Redis is used, redis-py returns
# an empty `command` field
# FIXME when https://github.com/andymccurdy/redis-py/pull/622 is released in redis-py
if command:
slowlog_tags.append('command:{0}'.format(command[0]))
value = slowlog['duration']
self.histogram('redis.slowlog.micros', value, tags=slowlog_tags)
self.last_timestamp_seen[ts_key] = max_ts
def _check_command_stats(self, conn, tags):
"""Get command-specific statistics from redis' INFO COMMANDSTATS command
"""
try:
command_stats = conn.info("commandstats")
except Exception:
self.warning("Could not retrieve command stats from Redis."
"INFO COMMANDSTATS only works with Redis >= 2.6.")
return
for key, stats in command_stats.iteritems():
command = key.split('_', 1)[1]
command_tags = tags + ['command:%s' % command]
self.gauge('redis.command.calls', stats['calls'], tags=command_tags)
self.gauge('redis.command.usec_per_call', stats['usec_per_call'], tags=command_tags)
def check(self, instance):
if ("host" not in instance or "port" not in instance) and "unix_socket_path" not in instance:
raise Exception("You must specify a host/port couple or a unix_socket_path")
custom_tags = instance.get('tags', [])
self._check_db(instance, custom_tags)
self._check_slowlog(instance, custom_tags)
def _collect_metadata(self, info):
if info and 'redis_version' in info:
self.service_metadata('version', info['redis_version'])
|
|
# Copyright (c) 2010-2015 Bo Lin
# Copyright (c) 2010-2015 Yanhong Annie Liu
# Copyright (c) 2010-2015 Stony Brook University
# Copyright (c) 2010-2015 The Research Foundation of SUNY
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from ast import *
from .dast import *
BOOLOP_SYMBOLS = {
AndOp: 'and',
OrOp: 'or'
}
BINOP_SYMBOLS = {
AddOp: '+',
SubOp: '-',
MultOp: '*',
DivOp: '/',
FloorDivOp: '//',
ModOp: '%',
LShiftOp: '<<',
RShiftOp: '>>',
BitOrOp: '|',
BitAndOp: '&',
BitXorOp: '^',
PowOp: '**'
}
CMPOP_SYMBOLS = {
EqOp: '==',
GtOp: '>',
GtEOp: '>=',
InOp: 'in',
IsOp: 'is',
IsNotOp: 'is not',
LtOp: '<',
LtEOp: '<=',
NotEqOp: '!=',
NotInOp: 'not in'
}
UNARYOP_SYMBOLS = {
InvertOp: '~',
NotOp: 'not',
UAddOp: '+',
USubOp: '-'
}
def to_pseudo(node, indent_with=' ' * 4, add_line_information=False):
"""This function can convert a node tree back into python sourcecode.
This is useful for debugging purposes, especially if you're dealing with
custom asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
If `add_line_information` is set to `True` comments for the line numbers
of the nodes are added to the output. This can be used to spot wrong line
number information of statement nodes.
"""
generator = PseudoCodeGenerator(indent_with, add_line_information)
try:
generator.visit(node)
return ''.join(generator.result)
except Exception as e:
print("Error during code generation. So far we have: %s"%(''.join(generator.result)))
raise e
class PseudoCodeGenerator(NodeVisitor):
"""Generate DistAlgo pseudo code from DistPy AST.
"""
def __init__(self, indent_with, add_line_information=False):
self.result = []
self.indent_with = indent_with
self.add_line_information = add_line_information
self.indentation = 0
self.new_lines = 0
self.assigning = False
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, node=None, extra=0):
self.new_lines = max(self.new_lines, 1 + extra)
if node is not None and self.add_line_information:
self.write('# line: %s' % node.lineno)
self.new_lines = 1
def body(self, statements):
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if len(node.elsebody) > 0:
self.newline()
self.write('else:')
self.body(node.elsebody)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def process_paras(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.params) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline(decorator)
self.write('@')
self.visit(decorator)
# Statements
def visit_AssignmentStmt(self, node):
self.newline(node)
self.visit(node.target)
self.write(' = ')
self.assigning = True
self.visit(node.value)
self.assigning = False
def visit_ImportFrom(self, node):
self.newline(node)
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.visit(item)
def visit_Import(self, node):
for item in node.names:
self.newline(node)
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
if not self.assigning:
self.newline(node)
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(extra=1)
self.decorators(node)
self.newline(node)
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
# ~~~
def visit_arg(self, node):
self.write(node.arg)
if node.annotation is not None:
self.visit(node.annotation)
def visit_Process(self, node):
self.write("process %s:" % node.name)
self.newline(node)
self.write("params: ")
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(extra=2)
self.decorators(node)
self.newline(node)
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_Process(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(extra=2)
#self.decorators(node)
self.newline(node)
self.write('process %s' % node.name)
# for base in node.bases:
# paren_or_comma()
# self.visit(base)
self.newline(node)
self.write
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_IfStmt(self, node):
self.newline(node)
self.write('if ')
self.assigning = True
self.visit(node.condition)
self.assigning = False
self.write(':')
self.body_or_else(node)
def visit_ForStmt(self, node):
self.newline(node)
self.assigning = True
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.assigning = False
self.body_or_else(node)
def visit_WhileStmt(self, node):
self.newline(node)
self.write('while ')
self.assigning = True
self.visit(node.condition)
self.assigning = False
self.write(':')
self.body_or_else(node)
def visit_AwaitStmt(self, node):
self.newline(node)
self.write('await ')
self.assigning = True
self.visit(node.condition)
self.assigning = False
if node.timeout is not None:
self.write(', timeout=')
self.visit(node.timeout)
def visit_BranchingAwaitStmt(self, node):
self.newline(node)
self.write('await')
if node.timeout is not None:
self.write(' timeout=')
self.visit(node.timeout)
self.write(':')
self.indentation += 1
for branch in node.branches:
self.visit(branch)
self.indentation -= 1
def visit_Branch(self, node):
self.newline(node)
self.write('case ')
self.assigning = True
self.visit(node.condition)
self.assigning = False
self.write(':')
self.body(node.body)
def visit_WithStmt(self, node):
self.newline(node)
self.write('with ')
self.assigning = True
self.visit(node.ctxexpr)
self.assigning = False
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline(node)
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline(node)
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_DeleteStmt(self, node):
self.newline(node)
self.write('del ')
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
def visit_TryStmt(self, node):
self.newline(node)
self.write('try:')
self.body(node.body)
for handler in node.excepthandlers:
self.visit(handler)
def visit_TryFinallyStmt(self, node):
self.newline(node)
self.write('try:')
self.body(node.body)
self.newline(node)
self.write('finally:')
self.body(node.finalbody)
def visit_Assert(self, node):
self.newline(node)
self.write('assert ')
self.assigning = True
self.visit(node.test)
self.assigning = False
if node.msg is not None:
self.write(', ')
self.visit(node.msg)
def visit_Global(self, node):
self.newline(node)
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline(node)
self.write('nonlocal ' + ', '.join(node.names))
def visit_ReturnStmt(self, node):
self.newline(node)
self.write('return ')
if node.value is not None:
self.assigning = True
self.visit(node.value)
self.assigning = False
def visit_Break(self, node):
self.newline(node)
self.write('break')
def visit_Continue(self, node):
self.newline(node)
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline(node)
self.write('raise ')
if hasattr(node, 'exc') and node.exc is not None:
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
if not self.assigning:
self.newline(node)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
# ~~~
set_helper = sequence_visit('{', '}')
def visit_Set(self, node):
if len(node.elts) == 0:
self.write('set()')
else:
self.set_helper(node)
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.assigning = True
self.write('(')
self.visit(node.right)
self.write(')')
self.assigning = False
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.newline(node)
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
a = self.assigning
self.assigning = True
self.visit(if_)
self.assigning = a
def visit_Comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_ExceptHandler(self, node):
self.newline(node)
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.write(node.name)
self.write(':')
self.body(node.body)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
from typing import Any, Dict, List
from superset.utils.core import AnnotationType, DTTM_ALIAS, TimeRangeEndpoint
from tests.integration_tests.base_tests import SupersetTestCase
query_birth_names = {
"extras": {
"where": "",
"time_range_endpoints": (
TimeRangeEndpoint.INCLUSIVE,
TimeRangeEndpoint.EXCLUSIVE,
),
"time_grain_sqla": "P1D",
},
"groupby": ["name"],
"metrics": [{"label": "sum__num"}],
"orderby": [("sum__num", False)],
"row_limit": 100,
"granularity": "ds",
"time_range": "100 years ago : now",
"timeseries_limit": 0,
"timeseries_limit_metric": None,
"order_desc": True,
"filters": [
{"col": "gender", "op": "==", "val": "boy"},
{"col": "num", "op": "IS NOT NULL"},
{"col": "name", "op": "NOT IN", "val": ["<NULL>", '"abc"']},
],
"having": "",
"having_filters": [],
"where": "",
}
QUERY_OBJECTS: Dict[str, Dict[str, object]] = {
"birth_names": query_birth_names,
# `:suffix` are overrides only
"birth_names:include_time": {"groupby": [DTTM_ALIAS, "name"],},
"birth_names:orderby_dup_alias": {
"metrics": [
{
"expressionType": "SIMPLE",
"column": {"column_name": "num_girls", "type": "BIGINT(20)"},
"aggregate": "SUM",
"label": "num_girls",
},
{
"expressionType": "SIMPLE",
"column": {"column_name": "num_boys", "type": "BIGINT(20)"},
"aggregate": "SUM",
"label": "num_boys",
},
],
"orderby": [
[
{
"expressionType": "SIMPLE",
"column": {"column_name": "num_girls", "type": "BIGINT(20)"},
"aggregate": "SUM",
# the same underlying expression, but different label
"label": "SUM(num_girls)",
},
False,
],
# reference the ambiguous alias in SIMPLE metric
[
{
"expressionType": "SIMPLE",
"column": {"column_name": "num_boys", "type": "BIGINT(20)"},
"aggregate": "AVG",
"label": "AVG(num_boys)",
},
False,
],
# reference the ambiguous alias in CUSTOM SQL metric
[
{
"expressionType": "SQL",
"sqlExpression": "MAX(CASE WHEN num_boys > 0 THEN 1 ELSE 0 END)",
"label": "MAX(CASE WHEN...",
},
True,
],
],
},
"birth_names:only_orderby_has_metric": {"metrics": [],},
}
ANNOTATION_LAYERS = {
AnnotationType.FORMULA: {
"annotationType": "FORMULA",
"color": "#ff7f44",
"hideLine": False,
"name": "my formula",
"opacity": "",
"overrides": {"time_range": None},
"show": True,
"showMarkers": False,
"sourceType": "",
"style": "solid",
"value": "3+x",
"width": 5,
},
AnnotationType.EVENT: {
"name": "my event",
"annotationType": "EVENT",
"sourceType": "NATIVE",
"color": "#e04355",
"opacity": "",
"style": "solid",
"width": 5,
"showMarkers": False,
"hideLine": False,
"value": 1,
"overrides": {"time_range": None},
"show": True,
"titleColumn": "",
"descriptionColumns": [],
"timeColumn": "",
"intervalEndColumn": "",
},
AnnotationType.INTERVAL: {
"name": "my interval",
"annotationType": "INTERVAL",
"sourceType": "NATIVE",
"color": "#e04355",
"opacity": "",
"style": "solid",
"width": 1,
"showMarkers": False,
"hideLine": False,
"value": 1,
"overrides": {"time_range": None},
"show": True,
"titleColumn": "",
"descriptionColumns": [],
"timeColumn": "",
"intervalEndColumn": "",
},
AnnotationType.TIME_SERIES: {
"annotationType": "TIME_SERIES",
"color": None,
"descriptionColumns": [],
"hideLine": False,
"intervalEndColumn": "",
"name": "my line",
"opacity": "",
"overrides": {"time_range": None},
"show": True,
"showMarkers": False,
"sourceType": "line",
"style": "dashed",
"timeColumn": "",
"titleColumn": "",
"value": 837,
"width": 5,
},
}
POSTPROCESSING_OPERATIONS = {
"birth_names": [
{
"operation": "aggregate",
"options": {
"groupby": ["gender"],
"aggregates": {
"q1": {
"operator": "percentile",
"column": "sum__num",
"options": {"q": 25},
},
"median": {"operator": "median", "column": "sum__num",},
},
},
},
{"operation": "sort", "options": {"columns": {"q1": False, "gender": True},},},
]
}
def get_query_object(
query_name: str, add_postprocessing_operations: bool, add_time_offsets: bool,
) -> Dict[str, Any]:
if query_name not in QUERY_OBJECTS:
raise Exception(f"QueryObject fixture not defined for datasource: {query_name}")
obj = QUERY_OBJECTS[query_name]
# apply overrides
if ":" in query_name:
parent_query_name = query_name.split(":")[0]
obj = {
**QUERY_OBJECTS[parent_query_name],
**obj,
}
query_object = copy.deepcopy(obj)
if add_postprocessing_operations:
query_object["post_processing"] = _get_postprocessing_operation(query_name)
if add_time_offsets:
query_object["time_offsets"] = ["1 year ago"]
return query_object
def _get_postprocessing_operation(query_name: str) -> List[Dict[str, Any]]:
if query_name not in QUERY_OBJECTS:
raise Exception(
f"Post-processing fixture not defined for datasource: {query_name}"
)
return copy.deepcopy(POSTPROCESSING_OPERATIONS[query_name])
def get_query_context(
query_name: str,
add_postprocessing_operations: bool = False,
add_time_offsets: bool = False,
) -> Dict[str, Any]:
"""
Create a request payload for retrieving a QueryContext object via the
`api/v1/chart/data` endpoint. By default returns a payload corresponding to one
generated by the "Boy Name Cloud" chart in the examples.
:param query_name: name of an example query, which is always in the format
of `datasource_name[:test_case_name]`, where `:test_case_name` is optional.
:param datasource_id: id of datasource to query.
:param datasource_type: type of datasource to query.
:param add_postprocessing_operations: Add post-processing operations to QueryObject
:param add_time_offsets: Add time offsets to QueryObject(advanced analytics)
:return: Request payload
"""
table_name = query_name.split(":")[0]
table = SupersetTestCase.get_table(name=table_name)
return {
"datasource": {"id": table.id, "type": table.type},
"queries": [
get_query_object(
query_name, add_postprocessing_operations, add_time_offsets,
)
],
}
|
|
"""Mail.ru Authentication Views
You may see developer docs on http://api.mail.ru/docs/guides/oauth/
"""
import hashlib
import re
import uuid
from pyramid.httpexceptions import HTTPFound
from pyramid.security import NO_PERMISSION_REQUIRED
import requests
from ..api import (
AuthenticationComplete,
AuthenticationDenied,
register_provider,
)
from ..exceptions import CSRFError, ThirdPartyFailure
from ..settings import ProviderSettings
from ..utils import flat_url
PROVIDER_NAME = 'mailru'
PROVIDER_DOMAIN = 'mail.ru'
PROVIDER_AUTH_URL = 'https://connect.mail.ru/oauth/authorize'
PROVIDER_ACCESS_TOKEN_URL = 'https://connect.mail.ru/oauth/token'
PROVIDER_USER_PROFILE_URL = 'https://www.appsmail.ru/platform/api'
PROVIDER_USER_PROFILE_API_METHOD = 'users.getInfo'
FIELD_SEX = {
0: 'male',
1: 'female'
}
# Mail.ru provides a birthday information in form of 'dd.mm.yyyy' which is a
# regular representation of dates in Russia.
# Therefore, we must convert it into ISO 8601 in order to follow the
# Portable Contacts' birthday format.
FIELD_BIRTHDAY_RE = re.compile('(?P<dd>\d{2})\.(?P<mm>\d{2})\.(?P<yyyy>\d{4})')
class MailRuAuthenticationComplete(AuthenticationComplete):
"""MailRu auth complete"""
def includeme(config):
config.add_directive('add_mailru_login', add_mailru_login)
config.add_directive('add_mailru_login_from_settings',
add_mailru_login_from_settings)
def add_mailru_login_from_settings(config, prefix='velruse.mailru.'):
settings = config.registry.settings
p = ProviderSettings(settings, prefix)
p.update('consumer_key', required=True)
p.update('consumer_secret', required=True)
p.update('scope')
p.update('login_path')
p.update('callback_path')
config.add_mailru_login(**p.kwargs)
def add_mailru_login(
config,
consumer_key,
consumer_secret,
scope=None,
login_path='/login/{name}'.format(name=PROVIDER_NAME),
callback_path='/login/{name}/callback'.format(name=PROVIDER_NAME),
name=PROVIDER_NAME
):
"""Add a MailRu login provider to the application."""
provider = MailRuProvider(name, consumer_key, consumer_secret, scope)
config.add_route(provider.login_route, login_path)
config.add_view(
provider,
attr='login',
route_name=provider.login_route,
permission=NO_PERMISSION_REQUIRED
)
config.add_route(
provider.callback_route, callback_path,
use_global_views=True,
factory=provider.callback
)
register_provider(config, name, provider)
class MailRuProvider(object):
def __init__(self, name, consumer_key, consumer_secret, scope):
self.name = name
self.type = PROVIDER_NAME
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.scope = scope
self.login_route = 'velruse.{name}-login'.format(name=name)
self.callback_route = 'velruse.{name}-callback'.format(name=name)
def login(self, request):
"""Initiate a MailRu login"""
request.session['velruse.state'] = state = uuid.uuid4().hex
auth_url = flat_url(
PROVIDER_AUTH_URL,
scope=self.scope,
client_id=self.consumer_key,
redirect_uri=request.route_url(self.callback_route),
response_type='code',
state=state)
return HTTPFound(location=auth_url)
def callback(self, request):
"""Process the MailRu redirect"""
sess_state = request.session.pop('velruse.state', None)
req_state = request.GET.get('state')
if not sess_state or sess_state != req_state:
raise CSRFError(
'CSRF Validation check failed. Request state {req_state} is '
'not the same as session state {sess_state}'.format(
req_state=req_state,
sess_state=sess_state
)
)
code = request.GET.get('code')
if not code:
reason = request.GET.get('error', 'No reason provided.')
return AuthenticationDenied(
reason=reason,
provider_name=self.name,
provider_type=self.type
)
# Now retrieve the access token with the code
access_params = dict(
grant_type='authorization_code',
code=code,
client_id=self.consumer_key,
client_secret=self.consumer_secret,
redirect_uri=request.route_url(self.callback_route),
)
r = requests.post(PROVIDER_ACCESS_TOKEN_URL, access_params)
if r.status_code != 200:
raise ThirdPartyFailure(
'Status {status}: {content}'.format(
status=r.status_code, content=r.content
)
)
data = r.json()
access_token = data['access_token']
# Retrieve profile data.
# Mail.ru API requires a special parameter 'sig' which must be composed
# by the following sequence
signature = hashlib.md5(
'app_id={client_id}'
'method={method}'
'secure=1'
'session_key={access_token}'
'{secret_key}'.format(
client_id=self.consumer_key,
method=PROVIDER_USER_PROFILE_API_METHOD,
access_token=access_token,
secret_key=self.consumer_secret
)
).hexdigest()
# Read more about the following params on
# http://api.mail.ru/docs/guides/restapi/#params
profile_url = flat_url(
PROVIDER_USER_PROFILE_URL,
method=PROVIDER_USER_PROFILE_API_METHOD,
app_id=self.consumer_key,
sig=signature,
session_key=access_token,
secure=1
)
r = requests.get(profile_url)
if r.status_code != 200:
raise ThirdPartyFailure(
'Status {status}: {content}'.format(
status=r.status_code, content=r.content
)
)
profile = r.json()[0]
profile = extract_normalize_mailru_data(profile)
cred = {'oauthAccessToken': access_token}
return MailRuAuthenticationComplete(
profile=profile,
credentials=cred,
provider_name=self.name,
provider_type=self.type
)
def extract_normalize_mailru_data(data):
"""Extract and normalize MailRu data returned by the provider"""
# You may see the input data format on
# http://api.mail.ru/docs/reference/rest/users-getinfo/#result
profile = {
'accounts': [
{
'domain': PROVIDER_DOMAIN,
'userid': data['uid']
}
],
'name': {},
'gender': FIELD_SEX.get(data.get('sex')),
'photos': [],
'addresses': []
}
# Names
nickname = data.get('nick')
if nickname:
profile['preferredUsername'] = nickname
first_name = data.get('first_name')
if first_name:
profile['name']['givenName'] = first_name
last_name = data.get('last_name')
if last_name:
profile['name']['familyName'] = last_name
if first_name and last_name:
profile['displayName'] = u'{} {}'.format(first_name, last_name).strip()
elif first_name:
profile['displayName'] = first_name
elif last_name:
profile['displayName'] = first_name
elif nickname:
profile['displayName'] = nickname
else:
profile['displayName'] = 'Mail.ru user {uid}'.format(uid=data['uid'])
# Birthday
match = FIELD_BIRTHDAY_RE.match(data.get('birthday', ''))
if match:
profile['birthday'] = '{yyyy}-{mm}-{dd}'.format(**match.groupdict())
# Email
email = data.get('email')
if email:
profile['emails'] = [{
'value': email,
'primary': True
}]
# URLs
link = data.get('link')
if link:
profile['urls'] = [{
'value': link
}]
# Photos
if data.get('has_pic'):
road_map = [
[
# field suffix
'',
# type
'original'
],
['_big', 'big'],
['_small', 'small'],
['_190', 'custom_190'],
['_180', 'custom_180'],
['_128', 'custom_128'],
['_50', 'custom_50'],
['_40', 'custom_40'],
['_32', 'custom_32'],
['_22', 'custom_22']
]
for item in road_map:
photo, image_type = item
photo = data.get('pic{photo_suffix}'.format(photo_suffix=photo))
if photo:
profile['photos'].append({
'value': photo,
'type': image_type
})
# Location
location = data.get('location', {})
country = location.get('country', {}).get('name')
region = location.get('region', {}).get('name')
city = location.get('city', {}).get('name')
if country or region or city:
address = {}
if country:
address['country'] = country
if region:
address['region'] = region
if city:
address['locality'] = city
profile['addresses'].append(address)
# Now strip out empty values
for k, v in profile.items():
if not v or (isinstance(v, list) and not v[0]):
del profile[k]
return profile
|
|
from .matrixtools import blank_matrix, \
identity_matrix, \
short_to_long, \
long_to_short, \
short_matrices_compatible, \
list_to_sublists, \
list_of_sublists_to_list_of_sums, \
add_short, \
subtract_short, \
multiply_short, \
divide_short,\
transpose
from numpy.linalg import inv, det
import numpy
import os
import sqlite3
from .matches import naicsmatch, fipsmatch
from .UStable import raw_direct_requirements, industry_totals
# Query function copied from queries.py until directory is fixed
def select_column_yield(conn, column_name, fips, own, naics, year):
'''return column from yield for a given area, industry, and year
:param: fips_id is the 2 digit fips id
:param: naics_id is the 3 or 6 digit naics id
:param: year... well you know the year
'''
sql = '''SELECT {} FROM yield
WHERE fips_id = '{}'
AND own_code = '{}'
AND naics_id = '{}'
AND year = '{}' '''.format(column_name,fips,own,naics,year)
cur = conn.cursor()
cur.execute(sql)
row = cur.fetchone()
return row
# This matches any given NAICS to its IO sector.
# If a user enters an invalid NAICS, raise a custom ValueError
def match_naics_to_IO(NAICS=111):
match = naicsmatch
IO = 0
def loop(NAICS):
try:
IO = [pair['IO'] for pair in match if int(pair['NAICS']) == NAICS][0]
return IO
except:
NAICS = str(NAICS)
try:
NAICS = int(NAICS[:len(NAICS) - 1])
return loop(NAICS)
except:
raise ValueError('Invalid NAICS entered')
return loop(NAICS)
#This matches a state to a fips:
def fips_match(state):
for match in fipsmatch:
if match['state'] == state:
result = match['fips']
else:
pass
return result
# Return a list of values 0 to 1 for each IO sector (1 through 66)
def determine_LQs(geo='Alabama', year=2015, lq_type='wages'):
base_dir = os.path.dirname(os.path.abspath(__file__))
database = os.path.join(os.path.dirname(base_dir), 'db.sqlite3')
conn = sqlite3.connect(database)
# Get the fips for our state for the query
fips = fips_match(geo)
# Create empty lists to add emp and wages to
local = [{'IO': i+1, 'employees': 0, 'wages': 0} for i in range(0, 66)]
us = [{'IO': i+1, 'employees': 0, 'wages': 0} for i in range(0, 66)]
# Create empty lists to store erroneous NAICS if we need them
localerrorlist = []
userrorlist = []
# Loop through every NAICS for every IO and add that emp and wage to the IO for the geo
for io in local:
for dic in naicsmatch:
naics = dic['NAICS']
if dic['IO'] == io['IO']:
try:
io['employees'] += select_column_yield(conn, 'employees', fips, 5, naics, year)[0]
io['wages'] += select_column_yield(conn, 'wages', fips, 5, naics, year)[0]
except:
localerrorlist.append(naics)
# Loop through every NAICS for every IO and add that emp and wage to the IO for the US
for io in us:
for dic in naicsmatch:
naics = dic['NAICS']
if dic['IO'] == io['IO']:
try:
io['employees'] += select_column_yield(conn, 'employees', 'US', 5, naics, year)[0]
io['wages'] += select_column_yield(conn, 'wages', 'US', 5, naics, year)[0]
except:
userrorlist.append(naics)
# Create 0 value variables for local and US emp and wages
localtotalemp = 0
localtotalwages = 0
ustotalemp = 0
ustotalwages = 0
# Set those variables to equal the total emp and total wages for US and the local geo
for dict in local:
localtotalemp += dict['employees']
localtotalwages += dict['wages']
for dict in us:
ustotalemp += dict['employees']
ustotalwages += dict['wages']
# Create a list of 66 0s for the LQs
lqs = [0 for i in range(0,66)]
# Determine LQs
if lq_type == 'employees':
localdenominator = localtotalemp
usdenominator = ustotalemp
else:
localdenominator = localtotalwages
usdenominator = ustotalwages
for dict in local:
lqs[dict['IO']-1] = dict[lq_type] / localdenominator
for dict in us:
try:
lqs[dict['IO']-1] /= dict[lq_type] / usdenominator
except:
pass
# Add a 67th LQ equal to 1 for government employment
lqs.append(1)
# If an lq is greater than 1, correct it to 1
# Make sure no lqs are less than 0
for i in range(len(lqs)):
if lqs[i] > 1:
lqs[i] = 1
elif lqs[i] < 0:
lqs[i] = 0
else:
pass
return [lqs, us, local]
# Return a list of lists of direct requirements for the given year, geo, and type
def determine_direct_requirements(geo='US', year=2015, lq_type='wages'):
lqoutput = determine_LQs(geo, year, lq_type)
lqs = lqoutput[0]
us = lqoutput[1]
local = lqoutput[2]
direct_requirements = multiply_short(list_to_sublists(lqs, 67), raw_direct_requirements)
# Create an empty list for US revenue per employee
us_rev_per_emp = [0 for i in range(66)]
counter = 0
for i in us_rev_per_emp:
try:
us_rev_per_emp[counter] = industry_totals[counter] / us[counter]['employees']
except:
pass
counter += 1
return [direct_requirements, us, local, us_rev_per_emp]
# TODO:
# check direct requirements--make sure they are shares
def impact(geo_direct_requirements, impact_input):
# {'ID':' 111CA', 'Desc':'Farms',
# 'direct_rev':233900, 'indirect_rev':3326, 'induced_rev':705,
# 'direct_emp':2, 'indirect_emp':0.03326, 'induced_emp':0.00704964751762412,
# 'direct_wage':46780, 'indirect_wage':432.38, 'induced_wage':105.75}
final_output = [
{'ID':0, 'Desc':'',
'direct_rev':0, 'indirect_rev':0, 'induced_rev':0,
'direct_emp':0, 'indirect_emp':0, 'induced_emp':0,
'direct_wage':0, 'indirect_wage':0, 'induced_wage':0} for i in range(66)
]
counter = 1
for i in final_output:
i['ID'] = counter
i['Desc'] = naicsmatch[counter-1]['desc']
counter += 1
# Create lists of 0s for our direct inputs
direct_employment = [0 for i in range(66)]
direct_wages = [0 for i in range(66)]
direct_revenue = [0 for i in range(66)]
# Change those values based on actual user input
direct_employment[impact_input[0]-1] = impact_input[1][1][1]
direct_wages[impact_input[0]-1] = impact_input[1][1][2]
direct_revenue[impact_input[0]-1] = impact_input[1][1][3]
# This is our US emp and wages for each IO
us = geo_direct_requirements[1]
# This is our local emp and wages for each IO
local = geo_direct_requirements[2]
# This is our rev/emp for the US
us_rev_per_emp = geo_direct_requirements[3]
# Get our direct requirements
direct_requirements = geo_direct_requirements[0]
# These are our type1 and type2 matrices
# divide direct requirements by industry totals and subtract that from the identity matrix
type_1_A = subtract_short(identity_matrix(67, 67), divide_short(direct_requirements, list_to_sublists(industry_totals, 67)))
type_1_B = subtract_short(identity_matrix(67, 67), divide_short(direct_requirements, list_to_sublists(industry_totals, 67)))
# We shorten type1--removing 1 row and 1 column so that is has the correct contents
del type_1_A[:1]
for i in type_1_A:
del i[:1]
# Our direct and indirect coefficients are the inverse of Type 1A
direct_and_indirect = inv(numpy.array(type_1_A)).tolist()
# Our indirect and induced coefficients are the inverse of Type 1B
indirect_and_induced = inv(numpy.array(type_1_B)).tolist()
# Remove PCE and labor income/purchases from direct_requiements
direct_requirements_shorter = direct_requirements
del direct_requirements_shorter[:1]
for i in direct_requirements:
del i[:1]
# Turn our list of direct revenue into a matrix so we can positionally multiply it for our first round purchases
direct_revenue_matrix = list_to_sublists(direct_revenue, 66)
# Our first round purchases are the direct requirements multiplied by the direct revenue for each sector
first_round_purchases = multiply_short(direct_revenue_matrix, direct_requirements_shorter)
sum_of_purchases = list_of_sublists_to_list_of_sums(transpose(first_round_purchases))
# Multiply our sum of purchases by our direct and indirect coefficients, get our indirect revenue
direct_revenue_copy = [0 for i in range(66)]
direct_revenue_copy[impact_input[0] - 1] = impact_input[1][1][3]
direct_and_indirect_revenue = multiply_short(list_to_sublists(sum_of_purchases, 66), direct_and_indirect)
indirect_revenue = list_of_sublists_to_list_of_sums(transpose(direct_and_indirect_revenue))
indirect_revenue[impact_input[0] - 1] -= impact_input[1][1][3]
# Add wages to our sum of purchases
sum_of_purchases_longer = sum_of_purchases
sum_of_purchases_longer.append(direct_wages[impact_input[0]-1])
# Multiply our sum of purchases (including wages) by our indirect and induced coefficients, get our induced revenue
indirect_and_induced_revenue = multiply_short(list_to_sublists(sum_of_purchases_longer, 67), indirect_and_induced)
induced_revenue = list_of_sublists_to_list_of_sums(transpose(indirect_and_induced_revenue))
del induced_revenue[:1]
counter = 0
for i in induced_revenue:
i -= indirect_revenue[counter]
counter += 1
# Fill our final output direct emp, wages, and rev
for i in final_output:
if i['ID'] == impact_input[0]:
i['direct_emp'] = impact_input[1][1][1]
i['direct_wage'] = impact_input[1][1][2]
i['direct_rev'] = impact_input[1][1][3]
# Fill our final output with indirect and induced rev
counter = 0
for i in final_output:
i['indirect_rev'] = indirect_revenue[i['ID']-1]
i['indirect_emp'] = indirect_revenue[i['ID']-1] / us_rev_per_emp[counter]
i['induced_rev'] = induced_revenue[i['ID']-1]
i['induced_emp'] = induced_revenue[i['ID']-1] / us_rev_per_emp[counter]
if local[counter]['wages'] == 0 or local[counter]['employees'] == 0:
pass
else:
i['indirect_wage'] = i['indirect_emp'] * local[counter]['wages'] / local[counter]['employees']
i['induced_wage'] = i['induced_emp'] * local[counter]['wages'] / local[counter]['employees']
counter += 1
# {'ID':' 111CA', 'Desc':'Farms',
# 'direct_rev':233900, 'indirect_rev':3326, 'induced_rev':705,
# 'direct_emp':2, 'indirect_emp':0.03326, 'induced_emp':0.00704964751762412,
# 'direct_wage':46780, 'indirect_wage':432.38, 'induced_wage':105.75}
return final_output
# Takes a complex user input and translates it to run through the impact model
def complex_impact(user_input=['Alabama', 2015, [111, 100, 5000000, 15000000], 'wages']):
IO = match_naics_to_IO(user_input[2][0])
final_user_input = [IO, [user_input[1], user_input[2], user_input[3]]]
direct_requirements = determine_direct_requirements(user_input[0], user_input[1], user_input[3])
return impact(geo_direct_requirements=direct_requirements, impact_input=final_user_input)
# Takes a simple input (geo, year, sector) and translates it to run through the impact model
def simple_impact(user_input=['Alabama', 2015, 1]):
# pull emp, wages for that sector
# revenue = industry output * LQ
# run model with those as user inputs
geo_direct_requirements = determine_direct_requirements(geo=user_input[0], year=user_input[1], lq_type='wages')
# This is our local emp and wages for each IO
local = geo_direct_requirements[2]
# This is our rev/emp for the US
us_rev_per_emp = geo_direct_requirements[3]
final_user_input = [user_input[2],
[user_input[1],
[111,
local[user_input[2]-1]['employees'],
local[user_input[2]-1]['wages'],
local[user_input[2]-1]['employees'] * us_rev_per_emp[user_input[2]-1]
],
'wages'
]
]
return impact(geo_direct_requirements=geo_direct_requirements, impact_input=final_user_input)
print(simple_impact(['Alabama', 2015, 1]))
|
|
"""distutils.dist
Provides the Distribution class, which represents the module distribution
being built/installed/distributed.
"""
__revision__ = "$Id: dist.py 65250 2008-07-26 20:09:45Z amaury.forgeotdarc $"
import sys, os, re
from copy import copy
try:
import warnings
except ImportError:
warnings = None
from distutils.errors import *
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from distutils.util import check_environ, strtobool, rfc822_escape
from distutils import log
from distutils.debug import DEBUG
# Regex to define acceptable Distutils command names. This is not *quite*
# the same as a Python NAME -- I don't allow leading underscores. The fact
# that they're very similar is no coincidence; the default naming scheme is
# to look for a Python module named after the command.
command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
class Distribution:
"""The core of the Distutils. Most of the work hiding behind 'setup'
is really done within a Distribution instance, which farms the work out
to the Distutils commands specified on the command line.
Setup scripts will almost never instantiate Distribution directly,
unless the 'setup()' function is totally inadequate to their needs.
However, it is conceivable that a setup script might wish to subclass
Distribution for some specialized purpose, and then pass the subclass
to 'setup()' as the 'distclass' keyword argument. If so, it is
necessary to respect the expectations that 'setup' has of Distribution.
See the code for 'setup()', in core.py, for details.
"""
# 'global_options' describes the command-line options that may be
# supplied to the setup script prior to any actual commands.
# Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
# these global options. This list should be kept to a bare minimum,
# since every global option is also valid as a command option -- and we
# don't want to pollute the commands with too many options that they
# have minimal control over.
# The fourth entry for verbose means that it can be repeated.
global_options = [('verbose', 'v', "run verbosely (default)", 1),
('quiet', 'q', "run quietly (turns verbosity off)"),
('dry-run', 'n', "don't actually do anything"),
('help', 'h', "show detailed help message"),
]
# 'common_usage' is a short (2-3 line) string describing the common
# usage of the setup script.
common_usage = """\
Common commands: (see '--help-commands' for more)
setup.py build will build the package underneath 'build/'
setup.py install will install the package
"""
# options that are not propagated to the commands
display_options = [
('help-commands', None,
"list all available commands"),
('name', None,
"print package name"),
('version', 'V',
"print package version"),
('fullname', None,
"print <package name>-<version>"),
('author', None,
"print the author's name"),
('author-email', None,
"print the author's email address"),
('maintainer', None,
"print the maintainer's name"),
('maintainer-email', None,
"print the maintainer's email address"),
('contact', None,
"print the maintainer's name if known, else the author's"),
('contact-email', None,
"print the maintainer's email address if known, else the author's"),
('url', None,
"print the URL for this package"),
('license', None,
"print the license of the package"),
('licence', None,
"alias for --license"),
('description', None,
"print the package description"),
('long-description', None,
"print the long package description"),
('platforms', None,
"print the list of platforms"),
('classifiers', None,
"print the list of classifiers"),
('keywords', None,
"print the list of keywords"),
('provides', None,
"print the list of packages/modules provided"),
('requires', None,
"print the list of packages/modules required"),
('obsoletes', None,
"print the list of packages/modules made obsolete")
]
display_option_names = [translate_longopt(x[0]) for x in display_options]
# negative options are options that exclude other options
negative_opt = {'quiet': 'verbose'}
# -- Creation/initialization methods -------------------------------
def __init__ (self, attrs=None):
"""Construct a new Distribution instance: initialize all the
attributes of a Distribution, and then use 'attrs' (a dictionary
mapping attribute names to values) to assign some of those
attributes their "real" values. (Any attributes not mentioned in
'attrs' will be assigned to some null value: 0, None, an empty list
or dictionary, etc.) Most importantly, initialize the
'command_obj' attribute to the empty dictionary; this will be
filled in with real command objects by 'parse_command_line()'.
"""
# Default values for our command-line options
self.verbose = 1
self.dry_run = 0
self.help = 0
for attr in self.display_option_names:
setattr(self, attr, 0)
# Store the distribution meta-data (name, version, author, and so
# forth) in a separate object -- we're getting to have enough
# information here (and enough command-line options) that it's
# worth it. Also delegate 'get_XXX()' methods to the 'metadata'
# object in a sneaky and underhanded (but efficient!) way.
self.metadata = DistributionMetadata()
for basename in self.metadata._METHOD_BASENAMES:
method_name = "get_" + basename
setattr(self, method_name, getattr(self.metadata, method_name))
# 'cmdclass' maps command names to class objects, so we
# can 1) quickly figure out which class to instantiate when
# we need to create a new command object, and 2) have a way
# for the setup script to override command classes
self.cmdclass = {}
# 'command_packages' is a list of packages in which commands
# are searched for. The factory for command 'foo' is expected
# to be named 'foo' in the module 'foo' in one of the packages
# named here. This list is searched from the left; an error
# is raised if no named package provides the command being
# searched for. (Always access using get_command_packages().)
self.command_packages = None
# 'script_name' and 'script_args' are usually set to sys.argv[0]
# and sys.argv[1:], but they can be overridden when the caller is
# not necessarily a setup script run from the command-line.
self.script_name = None
self.script_args = None
# 'command_options' is where we store command options between
# parsing them (from config files, the command-line, etc.) and when
# they are actually needed -- ie. when the command in question is
# instantiated. It is a dictionary of dictionaries of 2-tuples:
# command_options = { command_name : { option : (source, value) } }
self.command_options = {}
# 'dist_files' is the list of (command, pyversion, file) that
# have been created by any dist commands run so far. This is
# filled regardless of whether the run is dry or not. pyversion
# gives sysconfig.get_python_version() if the dist file is
# specific to a Python version, 'any' if it is good for all
# Python versions on the target platform, and '' for a source
# file. pyversion should not be used to specify minimum or
# maximum required Python versions; use the metainfo for that
# instead.
self.dist_files = []
# These options are really the business of various commands, rather
# than of the Distribution itself. We provide aliases for them in
# Distribution as a convenience to the developer.
self.packages = None
self.package_data = {}
self.package_dir = None
self.py_modules = None
self.libraries = None
self.headers = None
self.ext_modules = None
self.ext_package = None
self.include_dirs = None
self.extra_path = None
self.scripts = None
self.data_files = None
# And now initialize bookkeeping stuff that can't be supplied by
# the caller at all. 'command_obj' maps command names to
# Command instances -- that's how we enforce that every command
# class is a singleton.
self.command_obj = {}
# 'have_run' maps command names to boolean values; it keeps track
# of whether we have actually run a particular command, to make it
# cheap to "run" a command whenever we think we might need to -- if
# it's already been done, no need for expensive filesystem
# operations, we just check the 'have_run' dictionary and carry on.
# It's only safe to query 'have_run' for a command class that has
# been instantiated -- a false value will be inserted when the
# command object is created, and replaced with a true value when
# the command is successfully run. Thus it's probably best to use
# '.get()' rather than a straight lookup.
self.have_run = {}
# Now we'll use the attrs dictionary (ultimately, keyword args from
# the setup script) to possibly override any or all of these
# distribution options.
if attrs:
# Pull out the set of command options and work on them
# specifically. Note that this order guarantees that aliased
# command options will override any supplied redundantly
# through the general options dictionary.
options = attrs.get('options')
if options:
del attrs['options']
for (command, cmd_options) in options.items():
opt_dict = self.get_option_dict(command)
for (opt, val) in cmd_options.items():
opt_dict[opt] = ("setup script", val)
if 'licence' in attrs:
attrs['license'] = attrs['licence']
del attrs['licence']
msg = "'licence' distribution option is deprecated; use 'license'"
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
# Now work on the rest of the attributes. Any attribute that's
# not already defined is invalid!
for (key,val) in attrs.items():
if hasattr(self.metadata, "set_" + key):
getattr(self.metadata, "set_" + key)(val)
elif hasattr(self.metadata, key):
setattr(self.metadata, key, val)
elif hasattr(self, key):
setattr(self, key, val)
else:
msg = "Unknown distribution option: %s" % repr(key)
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
self.finalize_options()
def get_option_dict (self, command):
"""Get the option dictionary for a given command. If that
command's option dictionary hasn't been created yet, then create it
and return the new dictionary; otherwise, return the existing
option dictionary.
"""
dict = self.command_options.get(command)
if dict is None:
dict = self.command_options[command] = {}
return dict
def dump_option_dicts (self, header=None, commands=None, indent=""):
from pprint import pformat
if commands is None: # dump all command option dicts
commands = sorted(self.command_options.keys())
if header is not None:
print(indent + header)
indent = indent + " "
if not commands:
print(indent + "no commands known yet")
return
for cmd_name in commands:
opt_dict = self.command_options.get(cmd_name)
if opt_dict is None:
print(indent + "no option dict for '%s' command" % cmd_name)
else:
print(indent + "option dict for '%s' command:" % cmd_name)
out = pformat(opt_dict)
for line in out.split("\n"):
print(indent + " " + line)
# -- Config file finding/parsing methods ---------------------------
def find_config_files (self):
"""Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac, and setup.cfg in the current directory.
"""
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
return files
def parse_config_files (self, filenames=None):
from configparser import ConfigParser
if filenames is None:
filenames = self.find_config_files()
if DEBUG: print("Distribution.parse_config_files():")
parser = ConfigParser()
for filename in filenames:
if DEBUG: print(" reading", filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__':
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg)
# -- Command-line parsing methods ----------------------------------
def parse_command_line (self):
"""Parse the setup script's command line, taken from the
'script_args' instance attribute (which defaults to 'sys.argv[1:]'
-- see 'setup()' in core.py). This list is first processed for
"global options" -- options that set attributes of the Distribution
instance. Then, it is alternately scanned for Distutils commands
and options for that command. Each new command terminates the
options for the previous command. The allowed options for a
command are determined by the 'user_options' attribute of the
command class -- thus, we have to be able to load command classes
in order to parse the command line. Any error in that 'options'
attribute raises DistutilsGetoptError; any error on the
command-line raises DistutilsArgError. If no Distutils commands
were found on the command line, raises DistutilsArgError. Return
true if command-line was successfully parsed and we should carry
on with executing commands; false if no errors but we shouldn't
execute commands (currently, this only happens if user asks for
help).
"""
#
# We now have enough information to show the Macintosh dialog
# that allows the user to interactively specify the "command line".
#
toplevel_options = self._get_toplevel_options()
if sys.platform == 'mac':
import EasyDialogs
cmdlist = self.get_command_list()
self.script_args = EasyDialogs.GetArgv(
toplevel_options + self.display_options, cmdlist)
# We have to parse the command line a bit at a time -- global
# options, then the first command, then its options, and so on --
# because each command will be handled by a different class, and
# the options that are valid for a particular class aren't known
# until we have loaded the command class, which doesn't happen
# until we know what the command is.
self.commands = []
parser = FancyGetopt(toplevel_options + self.display_options)
parser.set_negative_aliases(self.negative_opt)
parser.set_aliases({'licence': 'license'})
args = parser.getopt(args=self.script_args, object=self)
option_order = parser.get_option_order()
log.set_verbosity(self.verbose)
# for display options we return immediately
if self.handle_display_options(option_order):
return
while args:
args = self._parse_command_opts(parser, args)
if args is None: # user asked for help (and got it)
return
# Handle the cases of --help as a "global" option, ie.
# "setup.py --help" and "setup.py --help command ...". For the
# former, we show global options (--verbose, --dry-run, etc.)
# and display-only options (--name, --version, etc.); for the
# latter, we omit the display-only options and show help for
# each command listed on the command line.
if self.help:
self._show_help(parser,
display_options=len(self.commands) == 0,
commands=self.commands)
return
# Oops, no commands found -- an end-user error
if not self.commands:
raise DistutilsArgError("no commands supplied")
# All is well: return true
return True
def _get_toplevel_options (self):
"""Return the non-display options recognized at the top level.
This includes options that are recognized *only* at the top
level as well as options recognized for commands.
"""
return self.global_options + [
("command-packages=", None,
"list of packages that provide distutils commands"),
]
def _parse_command_opts (self, parser, args):
"""Parse the command-line options for a single command.
'parser' must be a FancyGetopt instance; 'args' must be the list
of arguments, starting with the current command (whose options
we are about to parse). Returns a new version of 'args' with
the next command at the front of the list; will be the empty
list if there are no more commands on the command line. Returns
None if the user asked for help on this command.
"""
# late import because of mutual dependence between these modules
from distutils.cmd import Command
# Pull the current command from the head of the command line
command = args[0]
if not command_re.match(command):
raise SystemExit("invalid command name '%s'" % command)
self.commands.append(command)
# Dig up the command class that implements this command, so we
# 1) know that it's a valid command, and 2) know which options
# it takes.
try:
cmd_class = self.get_command_class(command)
except DistutilsModuleError as msg:
raise DistutilsArgError(msg)
# Require that the command class be derived from Command -- want
# to be sure that the basic "command" interface is implemented.
if not issubclass(cmd_class, Command):
raise DistutilsClassError(
"command class %s must subclass Command" % cmd_class)
# Also make sure that the command object provides a list of its
# known options.
if not (hasattr(cmd_class, 'user_options') and
isinstance(cmd_class.user_options, list)):
raise DistutilsClassError(("command class %s must provide " +
"'user_options' attribute (a list of tuples)") % \
cmd_class)
# If the command class has a list of negative alias options,
# merge it in with the global negative aliases.
negative_opt = self.negative_opt
if hasattr(cmd_class, 'negative_opt'):
negative_opt = copy(negative_opt)
negative_opt.update(cmd_class.negative_opt)
# Check for help_options in command class. They have a different
# format (tuple of four) so we need to preprocess them here.
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_options = fix_help_options(cmd_class.help_options)
else:
help_options = []
# All commands support the global options too, just by adding
# in 'global_options'.
parser.set_option_table(self.global_options +
cmd_class.user_options +
help_options)
parser.set_negative_aliases(negative_opt)
(args, opts) = parser.getopt(args[1:])
if hasattr(opts, 'help') and opts.help:
self._show_help(parser, display_options=0, commands=[cmd_class])
return
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_option_found=0
for (help_option, short, desc, func) in cmd_class.help_options:
if hasattr(opts, parser.get_attr_name(help_option)):
help_option_found=1
#print "showing help for option %s of command %s" % \
# (help_option[0],cmd_class)
if hasattr(func, '__call__'):
func()
else:
raise DistutilsClassError(
"invalid help function %r for help option '%s': "
"must be a callable object (function, etc.)"
% (func, help_option))
if help_option_found:
return
# Put the options from the command-line into their official
# holding pen, the 'command_options' dictionary.
opt_dict = self.get_option_dict(command)
for (name, value) in vars(opts).items():
opt_dict[name] = ("command line", value)
return args
def finalize_options (self):
"""Set final values for all the options on the Distribution
instance, analogous to the .finalize_options() method of Command
objects.
"""
keywords = self.metadata.keywords
if keywords is not None:
if isinstance(keywords, str):
keywordlist = keywords.split(',')
self.metadata.keywords = [x.strip() for x in keywordlist]
platforms = self.metadata.platforms
if platforms is not None:
if isinstance(platforms, str):
platformlist = platforms.split(',')
self.metadata.platforms = [x.strip() for x in platformlist]
def _show_help (self,
parser,
global_options=1,
display_options=1,
commands=[]):
"""Show help for the setup script command-line in the form of
several lists of command-line options. 'parser' should be a
FancyGetopt instance; do not expect it to be returned in the
same state, as its option table will be reset to make it
generate the correct help text.
If 'global_options' is true, lists the global options:
--verbose, --dry-run, etc. If 'display_options' is true, lists
the "display-only" options: --name, --version, etc. Finally,
lists per-command help for every command name or command class
in 'commands'.
"""
# late import because of mutual dependence between these modules
from distutils.core import gen_usage
from distutils.cmd import Command
if global_options:
if display_options:
options = self._get_toplevel_options()
else:
options = self.global_options
parser.set_option_table(options)
parser.print_help(self.common_usage + "\nGlobal options:")
print()
if display_options:
parser.set_option_table(self.display_options)
parser.print_help(
"Information display options (just display " +
"information, ignore any commands)")
print()
for command in self.commands:
if isinstance(command, type) and issubclass(command, Command):
klass = command
else:
klass = self.get_command_class(command)
if (hasattr(klass, 'help_options') and
isinstance(klass.help_options, list)):
parser.set_option_table(klass.user_options +
fix_help_options(klass.help_options))
else:
parser.set_option_table(klass.user_options)
parser.print_help("Options for '%s' command:" % klass.__name__)
print()
print(gen_usage(self.script_name))
return
def handle_display_options (self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
from distutils.core import gen_usage
# User just wants a list of commands -- we'll print it out and stop
# processing now (ie. if they ran "setup --help-commands foo bar",
# we ignore "foo bar").
if self.help_commands:
self.print_commands()
print()
print(gen_usage(self.script_name))
return 1
# If user supplied any of the "display metadata" options, then
# display that metadata in the order in which the user supplied the
# metadata options.
any_display_options = 0
is_display_option = {}
for option in self.display_options:
is_display_option[option[0]] = 1
for (opt, val) in option_order:
if val and is_display_option.get(opt):
opt = translate_longopt(opt)
value = getattr(self.metadata, "get_"+opt)()
if opt in ['keywords', 'platforms']:
print(','.join(value))
elif opt in ('classifiers', 'provides', 'requires',
'obsoletes'):
print('\n'.join(value))
else:
print(value)
any_display_options = 1
return any_display_options
def print_command_list (self, commands, header, max_length):
"""Print a subset of the list of all commands -- used by
'print_commands()'.
"""
print(header + ":")
for cmd in commands:
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
print(" %-*s %s" % (max_length, cmd, description))
def print_commands (self):
"""Print out a help message listing all available commands with a
description of each. The list is divided into "standard commands"
(listed in distutils.command.__all__) and "extra commands"
(mentioned in self.cmdclass, but not a standard command). The
descriptions come from the command class attribute
'description'.
"""
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
max_length = 0
for cmd in (std_commands + extra_commands):
if len(cmd) > max_length:
max_length = len(cmd)
self.print_command_list(std_commands,
"Standard commands",
max_length)
if extra_commands:
print()
self.print_command_list(extra_commands,
"Extra commands",
max_length)
def get_command_list (self):
"""Get a list of (command, description) tuples.
The list is divided into "standard commands" (listed in
distutils.command.__all__) and "extra commands" (mentioned in
self.cmdclass, but not a standard command). The descriptions come
from the command class attribute 'description'.
"""
# Currently this is only used on Mac OS, for the Mac-only GUI
# Distutils interface (by Jack Jansen)
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
rv = []
for cmd in (std_commands + extra_commands):
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
rv.append((cmd, description))
return rv
# -- Command class/object methods ----------------------------------
def get_command_packages (self):
"""Return a list of packages from which commands are loaded."""
pkgs = self.command_packages
if not isinstance(pkgs, type([])):
pkgs = (pkgs or "").split(",")
for i in range(len(pkgs)):
pkgs[i] = pkgs[i].strip()
pkgs = [p for p in pkgs if p]
if "distutils.command" not in pkgs:
pkgs.insert(0, "distutils.command")
self.command_packages = pkgs
return pkgs
def get_command_class (self, command):
"""Return the class that implements the Distutils command named by
'command'. First we check the 'cmdclass' dictionary; if the
command is mentioned there, we fetch the class object from the
dictionary and return it. Otherwise we load the command module
("distutils.command." + command) and fetch the command class from
the module. The loaded class is also stored in 'cmdclass'
to speed future calls to 'get_command_class()'.
Raises DistutilsModuleError if the expected module could not be
found, or if that module does not define the expected class.
"""
klass = self.cmdclass.get(command)
if klass:
return klass
for pkgname in self.get_command_packages():
module_name = "%s.%s" % (pkgname, command)
klass_name = command
try:
__import__ (module_name)
module = sys.modules[module_name]
except ImportError:
continue
try:
klass = getattr(module, klass_name)
except AttributeError:
raise DistutilsModuleError(
"invalid command '%s' (no class '%s' in module '%s')"
% (command, klass_name, module_name))
self.cmdclass[command] = klass
return klass
raise DistutilsModuleError("invalid command '%s'" % command)
def get_command_obj (self, command, create=1):
"""Return the command object for 'command'. Normally this object
is cached on a previous call to 'get_command_obj()'; if no command
object for 'command' is in the cache, then we either create and
return it (if 'create' is true) or return None.
"""
cmd_obj = self.command_obj.get(command)
if not cmd_obj and create:
if DEBUG:
print("Distribution.get_command_obj(): " \
"creating '%s' command object" % command)
klass = self.get_command_class(command)
cmd_obj = self.command_obj[command] = klass(self)
self.have_run[command] = 0
# Set any options that were supplied in config files
# or on the command line. (NB. support for error
# reporting is lame here: any errors aren't reported
# until 'finalize_options()' is called, which means
# we won't report the source of the error.)
options = self.command_options.get(command)
if options:
self._set_command_options(cmd_obj, options)
return cmd_obj
def _set_command_options (self, command_obj, option_dict=None):
"""Set the options for 'command_obj' from 'option_dict'. Basically
this means copying elements of a dictionary ('option_dict') to
attributes of an instance ('command').
'command_obj' must be a Command instance. If 'option_dict' is not
supplied, uses the standard option dictionary for this command
(from 'self.command_options').
"""
command_name = command_obj.get_command_name()
if option_dict is None:
option_dict = self.get_option_dict(command_name)
if DEBUG: print(" setting options for '%s' command:" % command_name)
for (option, (source, value)) in option_dict.items():
if DEBUG: print(" %s = %s (from %s)" % (option, value, source))
try:
bool_opts = [translate_longopt(o)
for o in command_obj.boolean_options]
except AttributeError:
bool_opts = []
try:
neg_opt = command_obj.negative_opt
except AttributeError:
neg_opt = {}
try:
is_string = isinstance(value, str)
if option in neg_opt and is_string:
setattr(command_obj, neg_opt[option], not strtobool(value))
elif option in bool_opts and is_string:
setattr(command_obj, option, strtobool(value))
elif hasattr(command_obj, option):
setattr(command_obj, option, value)
else:
raise DistutilsOptionError(
"error in %s: command '%s' has no such option '%s'"
% (source, command_name, option))
except ValueError as msg:
raise DistutilsOptionError(msg)
def reinitialize_command (self, command, reinit_subcommands=0):
"""Reinitializes a command to the state it was in when first
returned by 'get_command_obj()': ie., initialized but not yet
finalized. This provides the opportunity to sneak option
values in programmatically, overriding or supplementing
user-supplied values from the config files and command line.
You'll have to re-finalize the command object (by calling
'finalize_options()' or 'ensure_finalized()') before using it for
real.
'command' should be a command name (string) or command object. If
'reinit_subcommands' is true, also reinitializes the command's
sub-commands, as declared by the 'sub_commands' class attribute (if
it has one). See the "install" command for an example. Only
reinitializes the sub-commands that actually matter, ie. those
whose test predicates return true.
Returns the reinitialized command object.
"""
from distutils.cmd import Command
if not isinstance(command, Command):
command_name = command
command = self.get_command_obj(command_name)
else:
command_name = command.get_command_name()
if not command.finalized:
return command
command.initialize_options()
command.finalized = 0
self.have_run[command_name] = 0
self._set_command_options(command)
if reinit_subcommands:
for sub in command.get_sub_commands():
self.reinitialize_command(sub, reinit_subcommands)
return command
# -- Methods that operate on the Distribution ----------------------
def announce (self, msg, level=1):
log.debug(msg)
def run_commands (self):
"""Run each command that was seen on the setup script command line.
Uses the list of commands found and cache of command objects
created by 'get_command_obj()'.
"""
for cmd in self.commands:
self.run_command(cmd)
# -- Methods that operate on its Commands --------------------------
def run_command (self, command):
"""Do whatever it takes to run a command (including nothing at all,
if the command has already been run). Specifically: if we have
already created and run the command named by 'command', return
silently without doing anything. If the command named by 'command'
doesn't even have a command object yet, create one. Then invoke
'run()' on that command object (or an existing one).
"""
# Already been here, done that? then return silently.
if self.have_run.get(command):
return
log.info("running %s", command)
cmd_obj = self.get_command_obj(command)
cmd_obj.ensure_finalized()
cmd_obj.run()
self.have_run[command] = 1
# -- Distribution query methods ------------------------------------
def has_pure_modules (self):
return len(self.packages or self.py_modules or []) > 0
def has_ext_modules (self):
return self.ext_modules and len(self.ext_modules) > 0
def has_c_libraries (self):
return self.libraries and len(self.libraries) > 0
def has_modules (self):
return self.has_pure_modules() or self.has_ext_modules()
def has_headers (self):
return self.headers and len(self.headers) > 0
def has_scripts (self):
return self.scripts and len(self.scripts) > 0
def has_data_files (self):
return self.data_files and len(self.data_files) > 0
def is_pure (self):
return (self.has_pure_modules() and
not self.has_ext_modules() and
not self.has_c_libraries())
# -- Metadata query methods ----------------------------------------
# If you're looking for 'get_name()', 'get_version()', and so forth,
# they are defined in a sneaky way: the constructor binds self.get_XXX
# to self.metadata.get_XXX. The actual code is in the
# DistributionMetadata class, below.
# class Distribution
class DistributionMetadata:
"""Dummy class to hold the distribution meta-data: name, version,
author, and so forth.
"""
_METHOD_BASENAMES = ("name", "version", "author", "author_email",
"maintainer", "maintainer_email", "url",
"license", "description", "long_description",
"keywords", "platforms", "fullname", "contact",
"contact_email", "license", "classifiers",
"download_url",
# PEP 314
"provides", "requires", "obsoletes",
)
def __init__ (self):
self.name = None
self.version = None
self.author = None
self.author_email = None
self.maintainer = None
self.maintainer_email = None
self.url = None
self.license = None
self.description = None
self.long_description = None
self.keywords = None
self.platforms = None
self.classifiers = None
self.download_url = None
# PEP 314
self.provides = None
self.requires = None
self.obsoletes = None
def write_pkg_info (self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
pkg_info = open( os.path.join(base_dir, 'PKG-INFO'), 'w')
self.write_pkg_file(pkg_info)
pkg_info.close()
def write_pkg_file (self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if self.provides or self.requires or self.obsoletes:
version = '1.1'
file.write('Metadata-Version: %s\n' % version)
file.write('Name: %s\n' % self.get_name() )
file.write('Version: %s\n' % self.get_version() )
file.write('Summary: %s\n' % self.get_description() )
file.write('Home-page: %s\n' % self.get_url() )
file.write('Author: %s\n' % self.get_contact() )
file.write('Author-email: %s\n' % self.get_contact_email() )
file.write('License: %s\n' % self.get_license() )
if self.download_url:
file.write('Download-URL: %s\n' % self.download_url)
long_desc = rfc822_escape( self.get_long_description() )
file.write('Description: %s\n' % long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
file.write('Keywords: %s\n' % keywords )
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
def _write_list (self, file, name, values):
for value in values:
file.write('%s: %s\n' % (name, value))
# -- Metadata query methods ----------------------------------------
def get_name (self):
return self.name or "UNKNOWN"
def get_version(self):
return self.version or "0.0.0"
def get_fullname (self):
return "%s-%s" % (self.get_name(), self.get_version())
def get_author(self):
return self.author or "UNKNOWN"
def get_author_email(self):
return self.author_email or "UNKNOWN"
def get_maintainer(self):
return self.maintainer or "UNKNOWN"
def get_maintainer_email(self):
return self.maintainer_email or "UNKNOWN"
def get_contact(self):
return (self.maintainer or
self.author or
"UNKNOWN")
def get_contact_email(self):
return (self.maintainer_email or
self.author_email or
"UNKNOWN")
def get_url(self):
return self.url or "UNKNOWN"
def get_license(self):
return self.license or "UNKNOWN"
get_licence = get_license
def get_description(self):
return self.description or "UNKNOWN"
def get_long_description(self):
return self.long_description or "UNKNOWN"
def get_keywords(self):
return self.keywords or []
def get_platforms(self):
return self.platforms or ["UNKNOWN"]
def get_classifiers(self):
return self.classifiers or []
def get_download_url(self):
return self.download_url or "UNKNOWN"
# PEP 314
def get_requires(self):
return self.requires or []
def set_requires(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.requires = value
def get_provides(self):
return self.provides or []
def set_provides(self, value):
value = [v.strip() for v in value]
for v in value:
import distutils.versionpredicate
distutils.versionpredicate.split_provision(v)
self.provides = value
def get_obsoletes(self):
return self.obsoletes or []
def set_obsoletes(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.obsoletes = value
def fix_help_options (options):
"""Convert a 4-tuple 'help_options' list as found in various command
classes to the 3-tuple form required by FancyGetopt.
"""
new_options = []
for help_tuple in options:
new_options.append(help_tuple[0:3])
return new_options
if __name__ == "__main__":
dist = Distribution()
print("ok")
|
|
''' Some tests for filters '''
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_equal, assert_allclose,
assert_array_equal, assert_almost_equal,
suppress_warnings)
from pytest import raises as assert_raises
import scipy.ndimage as sndi
from scipy.ndimage.filters import _gaussian_kernel1d, rank_filter
def test_ticket_701():
# Test generic filter sizes
arr = np.arange(4).reshape((2,2))
func = lambda x: np.min(x)
res = sndi.generic_filter(arr, func, size=(1,1))
# The following raises an error unless ticket 701 is fixed
res2 = sndi.generic_filter(arr, func, size=1)
assert_equal(res, res2)
def test_gh_5430():
# At least one of these raises an error unless gh-5430 is
# fixed. In py2k an int is implemented using a C long, so
# which one fails depends on your system. In py3k there is only
# one arbitrary precision integer type, so both should fail.
sigma = np.int32(1)
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
sigma = np.int64(1)
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = 1
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = [1, 1]
out = sndi._ni_support._normalize_sequence(sigma, 2)
assert_equal(out, sigma)
# Also include the OPs original example to make sure we fixed the issue
x = np.random.normal(size=(256, 256))
perlin = np.zeros_like(x)
for i in 2**np.arange(6):
perlin += sndi.filters.gaussian_filter(x, i, mode="wrap") * i**2
# This also fixes gh-4106, show that the OPs example now runs.
x = np.int64(21)
sndi._ni_support._normalize_sequence(x, 0)
def test_gaussian_kernel1d():
radius = 10
sigma = 2
sigma2 = sigma * sigma
x = np.arange(-radius, radius + 1, dtype=np.double)
phi_x = np.exp(-0.5 * x * x / sigma2)
phi_x /= phi_x.sum()
assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius))
assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius))
assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2,
_gaussian_kernel1d(sigma, 2, radius))
assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2),
_gaussian_kernel1d(sigma, 3, radius))
def test_orders_gauss():
# Check order inputs to Gaussians
arr = np.zeros((1,))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=0))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=3))
assert_raises(ValueError, sndi.gaussian_filter, arr, 1, -1)
assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0))
assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3))
assert_raises(ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1)
def test_valid_origins():
"""Regression test for #1311."""
func = lambda x: np.mean(x)
data = np.array([1,2,3,4,5], dtype=np.float64)
assert_raises(ValueError, sndi.generic_filter, data, func, size=3,
origin=2)
assert_raises(ValueError, sndi.generic_filter1d, data, func,
filter_size=3, origin=2)
assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3,
origin=2)
for filter in [sndi.uniform_filter, sndi.minimum_filter,
sndi.maximum_filter, sndi.maximum_filter1d,
sndi.median_filter, sndi.minimum_filter1d]:
# This should work, since for size == 3, the valid range for origin is
# -1 to 1.
list(filter(data, 3, origin=-1))
list(filter(data, 3, origin=1))
# Just check this raises an error instead of silently accepting or
# segfaulting.
assert_raises(ValueError, filter, data, 3, origin=2)
def test_bad_convolve_and_correlate_origins():
"""Regression test for gh-822."""
# Before gh-822 was fixed, these would generate seg. faults or
# other crashes on many system.
assert_raises(ValueError, sndi.correlate1d,
[0, 1, 2, 3, 4, 5], [1, 1, 2, 0], origin=2)
assert_raises(ValueError, sndi.correlate,
[0, 1, 2, 3, 4, 5], [0, 1, 2], origin=[2])
assert_raises(ValueError, sndi.correlate,
np.ones((3, 5)), np.ones((2, 2)), origin=[0, 1])
assert_raises(ValueError, sndi.convolve1d,
np.arange(10), np.ones(3), origin=-2)
assert_raises(ValueError, sndi.convolve,
np.arange(10), np.ones(3), origin=[-2])
assert_raises(ValueError, sndi.convolve,
np.ones((3, 5)), np.ones((2, 2)), origin=[0, -2])
def test_multiple_modes():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying a single mode.
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
mode1 = 'reflect'
mode2 = ['reflect', 'reflect']
assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1),
sndi.gaussian_filter(arr, 1, mode=mode2))
assert_equal(sndi.prewitt(arr, mode=mode1),
sndi.prewitt(arr, mode=mode2))
assert_equal(sndi.sobel(arr, mode=mode1),
sndi.sobel(arr, mode=mode2))
assert_equal(sndi.laplace(arr, mode=mode1),
sndi.laplace(arr, mode=mode2))
assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1),
sndi.gaussian_laplace(arr, 1, mode=mode2))
assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1),
sndi.maximum_filter(arr, size=5, mode=mode2))
assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1),
sndi.minimum_filter(arr, size=5, mode=mode2))
assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1),
sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2))
assert_equal(sndi.uniform_filter(arr, 5, mode=mode1),
sndi.uniform_filter(arr, 5, mode=mode2))
def test_multiple_modes_sequentially():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying the filters with
# different modes sequentially
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
modes = ['reflect', 'wrap']
expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
assert_equal(expected,
sndi.gaussian_filter(arr, 1, mode=modes))
expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.uniform_filter(arr, 5, mode=modes))
expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.maximum_filter(arr, size=5, mode=modes))
expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.minimum_filter(arr, size=5, mode=modes))
def test_multiple_modes_prewitt():
# Test prewitt filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[1., -3., 2.],
[1., -2., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.prewitt(arr, mode=modes))
def test_multiple_modes_sobel():
# Test sobel filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[1., -4., 3.],
[2., -3., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.sobel(arr, mode=modes))
def test_multiple_modes_laplace():
# Test laplace filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[-2., 2., 1.],
[-2., -3., 2.],
[1., 1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.laplace(arr, mode=modes))
def test_multiple_modes_gaussian_laplace():
# Test gaussian_laplace filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[-0.28438687, 0.01559809, 0.19773499],
[-0.36630503, -0.20069774, 0.07483620],
[0.15849176, 0.18495566, 0.21934094]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
sndi.gaussian_laplace(arr, 1, mode=modes))
def test_multiple_modes_gaussian_gradient_magnitude():
# Test gaussian_gradient_magnitude filter for multiple
# extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[0.04928965, 0.09745625, 0.06405368],
[0.23056905, 0.14025305, 0.04550846],
[0.19894369, 0.14950060, 0.06796850]])
modes = ['reflect', 'wrap']
calculated = sndi.gaussian_gradient_magnitude(arr, 1, mode=modes)
assert_almost_equal(expected, calculated)
def test_multiple_modes_uniform():
# Test uniform filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[0.32, 0.40, 0.48],
[0.20, 0.28, 0.32],
[0.28, 0.32, 0.40]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
sndi.uniform_filter(arr, 5, mode=modes))
def test_gaussian_truncate():
# Test that Gaussian filters can be truncated at different widths.
# These tests only check that the result has the expected number
# of nonzero elements.
arr = np.zeros((100, 100), float)
arr[50, 50] = 1
num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum()
assert_equal(num_nonzeros_2, 21**2)
num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum()
assert_equal(num_nonzeros_5, 51**2)
# Test truncate when sigma is a sequence.
f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
fpos = f > 0
n0 = fpos.any(axis=0).sum()
# n0 should be 2*int(2.5*3.5 + 0.5) + 1
assert_equal(n0, 19)
n1 = fpos.any(axis=1).sum()
# n1 should be 2*int(0.5*3.5 + 0.5) + 1
assert_equal(n1, 5)
# Test gaussian_filter1d.
x = np.zeros(51)
x[25] = 1
f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5)
n = (f > 0).sum()
assert_equal(n, 15)
# Test gaussian_laplace
y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5)
nonzero_indices = np.nonzero(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
# Test gaussian_gradient_magnitude
y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
nonzero_indices = np.nonzero(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
class TestThreading(object):
def check_func_thread(self, n, fun, args, out):
from threading import Thread
thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]}) for x in range(n)]
[t.start() for t in thrds]
[t.join() for t in thrds]
def check_func_serial(self, n, fun, args, out):
for i in range(n):
fun(*args, output=out[i])
def test_correlate1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate1d, (d, np.arange(5)), os)
self.check_func_thread(4, sndi.correlate1d, (d, np.arange(5)), ot)
assert_array_equal(os, ot)
def test_correlate(self):
d = np.random.randn(500, 500)
k = np.random.randn(10, 10)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate, (d, k), os)
self.check_func_thread(4, sndi.correlate, (d, k), ot)
assert_array_equal(os, ot)
def test_median_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.median_filter, (d, 3), os)
self.check_func_thread(4, sndi.median_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_uniform_filter1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.uniform_filter1d, (d, 5), os)
self.check_func_thread(4, sndi.uniform_filter1d, (d, 5), ot)
assert_array_equal(os, ot)
def test_minmax_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.maximum_filter, (d, 3), os)
self.check_func_thread(4, sndi.maximum_filter, (d, 3), ot)
assert_array_equal(os, ot)
self.check_func_serial(4, sndi.minimum_filter, (d, 3), os)
self.check_func_thread(4, sndi.minimum_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_minmaximum_filter1d():
# Regression gh-3898
in_ = np.arange(10)
out = sndi.minimum_filter1d(in_, 1)
assert_equal(in_, out)
out = sndi.maximum_filter1d(in_, 1)
assert_equal(in_, out)
# Test reflect
out = sndi.minimum_filter1d(in_, 5, mode='reflect')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='reflect')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
#Test constant
out = sndi.minimum_filter1d(in_, 5, mode='constant', cval=-1)
assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
out = sndi.maximum_filter1d(in_, 5, mode='constant', cval=10)
assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
# Test nearest
out = sndi.minimum_filter1d(in_, 5, mode='nearest')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='nearest')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
# Test wrap
out = sndi.minimum_filter1d(in_, 5, mode='wrap')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
out = sndi.maximum_filter1d(in_, 5, mode='wrap')
assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
def test_uniform_filter1d_roundoff_errors():
# gh-6930
in_ = np.repeat([0, 1, 0], [9, 9, 9])
for filter_size in range(3, 10):
out = sndi.uniform_filter1d(in_, filter_size)
assert_equal(out.sum(), 10 - filter_size)
def test_footprint_all_zeros():
# regression test for gh-6876: footprint of all zeros segfaults
arr = np.random.randint(0, 100, (100, 100))
kernel = np.zeros((3, 3), bool)
with assert_raises(ValueError):
sndi.maximum_filter(arr, footprint=kernel)
def test_gaussian_filter():
# Test gaussian filter with np.float16
# gh-8207
data = np.array([1],dtype = np.float16)
sigma = 1.0
with assert_raises(RuntimeError):
sndi.gaussian_filter(data,sigma)
def test_rank_filter_noninteger_rank():
# regression test for issue 9388: ValueError for
# non integer rank when performing rank_filter
arr = np.random.random((10, 20, 30))
assert_raises(TypeError, rank_filter, arr, 0.5,
footprint=np.ones((1, 1, 10), dtype=bool))
def test_size_footprint_both_set():
# test for input validation, expect user warning when
# size and footprint is set
with suppress_warnings() as sup:
sup.filter(UserWarning,
"ignoring size because footprint is set")
arr = np.random.random((10, 20, 30))
rank_filter(arr, 5, size=2, footprint=np.ones((1, 1, 10), dtype=bool))
|
|
from flask import jsonify, request
from sqlalchemy.exc import IntegrityError
from server import app, sqldb
# from server.account.courses import add_courses
from server.account.degrees import add_schools_and_majors
from server.models import Account
"""
Example: JSON Encoding
{
'first': 'Josh',
'last': 'Doman',
'image_url': null,
'pennkey': 'joshdo',
'pennid': '144363238',
'degrees': [
{
'school_name': 'Engineering & Applied Science',
'school_code': 'EAS',
'degree_name':'Bachelor of Science in Economics',
'degree_code':'BS',
'expected_grad_term': '2020A',
'majors': [
{
'major_name': 'Applied Science - Computer Science',
'major_code': 'ASCS'
}
]
}, {
'school_name': 'Wharton Undergraduate',
'school_code': 'WH',
'degree_name':'Bachelor of Applied Science',
'degree_code':'BAS',
'expected_grad_term': '2020A',
'majors': [
{
'major_name': 'Wharton Ung Program - Undeclared',
'major_code': 'WUNG'
}
]
}
],
'courses': [
{
'term': '2019A',
'name': 'Advanced Corp Finance',
'dept': 'FNCE',
'code': '203',
'section': '001',
'building': 'JMHH',
'room': '370',
'weekdays': 'MW',
'start_date': '2019-01-16',
'end_date': '2019-05-01',
'start_time': '10:30 AM',
'end_time': '12:00 PM',
'instructors': [
'Christian Opp',
'Kevin Kaiser'
],
'meeting_times': [
{
'weekday': 'M',
'start_time': '10:00 AM',
'end_time': '11:00 AM',
'building': 'JMHH',
'room': '255'
},
{
'weekday': 'W',
'start_time': '10:00 AM',
'end_time': '11:00 AM',
'building': 'TOWN',
'room': '100'
},
{
'weekday': 'R',
'start_time': '2:00 PM',
'end_time': '3:00 PM'
}
]
}
]
}
"""
@app.route("/account/register", methods=["POST"])
def register_account_endpoint():
""" Add/update a Penn account in the database with degrees (optional) and current courses (optional) """
json = request.get_json()
if json:
try:
account = get_account(json)
try:
sqldb.session.add(account)
sqldb.session.commit()
except IntegrityError:
sqldb.session.rollback()
account = update_account(account)
sqldb.session.commit()
degrees = json.get("degrees")
if degrees:
add_schools_and_majors(account, degrees)
# courses = json.get('courses')
# if courses:
# add_courses(account, courses)
return jsonify({"account_id": account.id})
except KeyError as e:
return jsonify({"error": str(e)}), 400
else:
return jsonify({"error": "JSON not passed"}), 400
def get_account(json):
first = json.get("first")
last = json.get("last")
pennkey = json.get("pennkey")
if pennkey is None:
raise KeyError("pennkey is missing")
pennid = json.get("pennid")
email = json.get("email")
affiliations_list = json.get("affiliations")
affiliation = None
image_url = json.get("image_url")
if not email:
email = get_potential_email(json)
if affiliations_list:
filtered_affliations = filter(lambda x: x != "member", affiliations_list)
if filtered_affliations:
affiliation = ",".join(filtered_affliations)
return Account(
first=first,
last=last,
pennkey=pennkey,
pennid=pennid,
email=email,
affiliation=affiliation,
image_url=image_url,
)
def update_account(updated_account):
# Update an account (guaranteed to exist because pennkey already in database and pennkey unique)
account = Account.query.filter_by(pennkey=updated_account.pennkey).first()
if account:
account.first = updated_account.first
account.last = updated_account.last
if updated_account.email:
account.email = updated_account.email
if updated_account.image_url:
account.image_url = updated_account.image_url
if updated_account.pennid:
account.pennid = updated_account.pennid
if updated_account.affiliation:
account.affiliation = updated_account.affiliation
return account
def get_potential_email(json):
pennkey = json.get("pennkey")
degrees = json.get("degrees", None)
if degrees is None:
return None
email = None
if degrees:
for degree in degrees:
code = degree.get("school_code")
if code:
if "WH" in code:
return "{}@wharton.upenn.edu".format(pennkey)
elif "COL" in code:
email = "{}@sas.upenn.edu".format(pennkey)
elif "SAS" in code:
email = "{}@sas.upenn.edu".format(pennkey)
elif "EAS" in code:
email = "{}@seas.upenn.edu".format(pennkey)
elif "NUR" in code:
email = "{}@nursing.upenn.edu".format(pennkey)
elif "SOD" in code:
email = "{}@design.upenn.edu".format(pennkey)
elif "EDG" in code:
email = "{}@gse.upenn.edu".format(pennkey)
elif "GEP" in code:
email = "{}@seas.upenn.edu".format(pennkey)
elif "GAS" in code:
email = "{}@sas.upenn.edu".format(pennkey)
elif "GEN" in code:
email = "{}@seas.upenn.edu".format(pennkey)
elif "EDP" in code:
email = "{}@gse.upenn.edu".format(pennkey)
elif "LPS" in code:
email = "{}@sas.upenn.edu".format(pennkey)
elif "SP2" in code:
email = "{}@upenn.edu".format(pennkey)
elif "NUG" in code:
email = "{}@nursing.upenn.edu".format(pennkey)
return email
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrganizationMember.invitor'
db.add_column('website_organizationmember', 'invitor', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='_member_invitor', null=True, to=orm['auth.User']), keep_default=False)
def backwards(self, orm):
# Deleting field 'OrganizationMember.invitor'
db.delete_column('website_organizationmember', 'invitor_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.actiontutorial': {
'Meta': {'object_name': 'ActionTutorial'},
'action_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.application': {
'Meta': {'object_name': 'Application'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'current_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'})
},
'website.applicationanswer': {
'Meta': {'object_name': 'ApplicationAnswer'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicationhistory': {
'Meta': {'object_name': 'ApplicationHistory'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'status_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.document': {
'Meta': {'object_name': 'Document'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'file_path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone_mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_secondary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.personaddress': {
'Meta': {'object_name': 'PersonAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questiondependency': {
'Meta': {'object_name': 'QuestionDependency'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question1'", 'to': "orm['website.Question']"}),
'question2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question2'", 'to': "orm['website.Question']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'strength': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.tutorialpage': {
'Meta': {'object_name': 'TutorialPage'},
'display_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'selector': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'tip': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userreward': {
'Meta': {'object_name': 'UserReward'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RewardCategory']", 'null': 'True', 'blank': 'True'}),
'reward_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usertutorialhistory': {
'Meta': {'object_name': 'UserTutorialHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.usertutorialpagehistory': {
'Meta': {'object_name': 'UserTutorialPageHistory'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.TutorialPage']", 'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
|
|
# -*- coding: utf-8 -*-
"""
IAMService
"""
import time
import xml.sax.saxutils as saxutils
# post xml soap message
import sys, httplib
from lxml import etree
from cStringIO import StringIO
#import static
import toml
class IAMClient(object):
def __init__(self):
conf_fn = "config.toml"
with open(conf_fn) as conf_fh:
self.conf = toml.loads(conf_fh.read())["app"]
print(self.conf)
def searchAll(self, startPage, pageSize ):
#config = static.ERP_CONFIG #'SL 8.0'
query = {"username":self.conf["Admin"],"password":self.conf["Admin_Password"], "nonce":self.conf["Nonce"], "startPage":startPage, "pageSize": pageSize}
SM_TEMPLATE = r"""<?xml version="1.0" encoding="UTF-8"?>
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:sear="http://search.service.iam.foton.com/">
<soapenv:Header>
<wsse:Security soapenv:mustUnderstand="1" xmlns:wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd">
<wsse:UsernameToken wsu:Id="UsernameToken-1" xmlns:wsu="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<wsse:Username>%(username)s</wsse:Username>
<wsse:Password Type="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordText">%(password)s</wsse:Password>
<wsse:Nonce EncodingType="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary">%(nonce)s</wsse:Nonce>
<wsu:Created>2012-07-06T01:49:02.953Z</wsu:Created>
</wsse:UsernameToken>
</wsse:Security>
</soapenv:Header>
<soapenv:Body>
<sear:searchAll>
<arg0>%(startPage)s</arg0>
<arg1>%(pageSize)s</arg1>
<!--Optional:-->
<arg2>ou</arg2>
<arg3>true</arg3>
</sear:searchAll>
</soapenv:Body>
</soapenv:Envelope>""" % query
SoapMessage = SM_TEMPLATE
#print SoapMessage
#construct and send the header
host =self.conf["HOST"]
print(host)
webservice = httplib.HTTP(host)
service = self.conf["Service2"]
url = "/IAMService/services/soap/%s" %(service)
webservice.putrequest("POST", url)
webservice.putheader("Host", host)
webservice.putheader("User-Agent", "Mozilla/4.0+(compatible;+MSIE+6.0;+Windows+NT+5.2;+SV1;+.NET+CLR+1.1.4322)")
webservice.putheader("Content-type", "text/xml; charset=\"UTF-8\"")
webservice.putheader("Accept-Language", "en-us")
webservice.putheader("Content-length", "%d" % len(SoapMessage))
#webservice.putheader("SOAPAction", "authenticate")
webservice.endheaders()
webservice.send(SoapMessage)
# get the response
statuscode, statusmessage, header = webservice.getreply()
print "Response: ", statuscode, statusmessage, startPage
#print "headers: ", header
#print dir(webservice)
res = webservice.getfile().read()
fn = "%d.xml" %(time.time())
#print res
#with open(fn, 'w') as fh:
# fh.write(res)
return res #self.parseSessionToken(res)
def getResponse(self, xmlstr):
string_file = StringIO(xmlstr.replace('soap:',''))
#root = etree.fromstring(xml)
tree = etree.parse(string_file)
resp = None
for element in tree.xpath('/Envelope/Body'):
resp = element[0][1].text
return resp
def getResult(self, xmlstr):
resp = self.getResponse(xmlstr)
string_file = StringIO(resp)
#root = etree.fromstring(xml)
tree = etree.parse(string_file)
result = None
v = tree.xpath('/Parameters')[0]
l = len(v)
result = v[l-1].text
if result.count('successful') >0:
return "S"
else:
return "F"
def get_element_text(element, node):
v = element.xpath(node)
if len(v)>0:
#print v[0].text.encode("utf8")
return v[0].text.encode("utf8")
else:
return ""
def main():
cm = IAMClient()
fh = open("id3.csv","w")
for i in range(1, 20):
xmlstr = cm.searchAll(i,10)
string_file = StringIO(xmlstr.replace('soap:','').replace("ns2:",""))
#root = etree.fromstring(xml)
tree = etree.parse(string_file)
resp = None
for element in tree.xpath('/Envelope/Body/searchAllResponse/return/userData'):
#resp = element[0][1].text
#print "\n"
v1 = get_element_text(element, "cn")
v2 = get_element_text(element, "mail")
v3 = get_element_text(element, "fotonAppAtt37")
v4 = get_element_text(element, "mobile")
v5 = get_element_text(element, "telephoneNumber")
v6 = get_element_text(element, "uid")
v7 = get_element_text(element, "ou")
#print userPassword[0].text,
x = "%s,%s,%s,%s,%s,%s,%s\n" % (v1, v2, v3, v4, v5, v6, v7)
fh.write(x)
time.sleep(0.5)
fh.close()
"""
token = cm.parseSessionToken(xmlstr)
rtn = cm.callMethod(token, "")
print cm.getResult(rtn)
"""
if __name__ == '__main__':
main()
|
|
import collections
import os
import yaml
import re
import shlex
from .constants import *
from .utils import *
from .dockerutil import make_vol_opt
class ConfigError(Exception):
pass
class ConfigNotFoundError(ConfigError):
pass
class OverrideMixin:
'''
A mixin class that indicates an instance's value should override something
This class is mixed into objects loaded from YAML with an !override tag,
and any object can be checked if it is an OverrideMixin using isinstance().
'''
pass
class OverrideNone(OverrideMixin):
'''
Represents a None value that also has Override behavior
'''
def __bool__(self):
return False
class OverrideList(collections.UserList, OverrideMixin):
pass
class OverrideStr(str, OverrideMixin):
pass
# http://stackoverflow.com/a/9577670
class Loader(yaml.SafeLoader):
def __init__(self, stream, root=None):
if root is None:
self._root = os.path.split(stream.name)[0]
else:
self._root = root
self._cache = dict()
super().__init__(stream)
def from_yaml(self, node):
'''
Implementes a !from_yaml constructor with the following syntax:
!from_yaml filename key
Arguments:
filename: Filename of external YAML document from which to load,
relative to the current YAML file.
key: Key from external YAML document to return,
using a dot-separated syntax for nested keys.
Examples:
!from_yaml external.yml pop
!from_yaml external.yml foo.bar.pop
!from_yaml "another file.yml" "foo bar.snap crackle.pop"
'''
# Load the content from the node, as a scalar
content = self.construct_scalar(node)
# Split on unquoted spaces
parts = shlex.split(content)
if len(parts) != 2:
raise yaml.YAMLError('Two arguments expected to !from_yaml')
filename, key = parts
# path is relative to the current YAML document
path = os.path.join(self._root, filename)
# Load the other YAML document
doc = self._cache.get(path)
if not doc:
with open(path, 'r') as f:
doc = yaml.load(f, self.__class__)
self._cache[path] = doc
# Retrieve the key
try:
cur = doc
# Use a negative look-behind to split the key on non-escaped '.' characters
for k in re.split(r'(?<!\\)\.', key):
cur = cur[k.replace('\\.', '.')] # Be sure to replace any escaped '.' characters with *just* the '.'
except KeyError:
raise yaml.YAMLError('Key "{}" not found in {}'.format(key, filename))
return cur
def override(self, node):
'''
Implements !override constructor
'''
# Load the content from the node, as a scalar
content = self.construct_scalar(node)
# Dynamically add an OverrideMixin to the resulting object's type
obj = yaml.load(content, lambda s: Loader(s, root=self._root))
if obj is None:
obj = OverrideNone()
else:
objtype = type(obj)
mixin_type = type('Override' + objtype.__name__, (objtype, OverrideMixin), dict())
try:
obj.__class__ = mixin_type
except TypeError:
# Primitive classes (e.g., int, str) don't support __class__ assignment
obj = mixin_type(obj)
return obj
Loader.add_constructor('!from_yaml', Loader.from_yaml)
Loader.add_constructor('!override', Loader.override)
def find_config():
'''Search up the directory hierarchy for .scuba.yml
Returns: path, rel, config on success, or None if not found
path The absolute path of the directory where .scuba.yml was found
rel The relative path from the directory where .scuba.yml was found
to the current directory
config The loaded configuration
'''
cross_fs = 'SCUBA_DISCOVERY_ACROSS_FILESYSTEM' in os.environ
path = os.getcwd()
rel = ''
while True:
cfg_path = os.path.join(path, SCUBA_YML)
if os.path.exists(cfg_path):
return path, rel, load_config(cfg_path)
if not cross_fs and os.path.ismount(path):
msg = '{} not found here or any parent up to mount point {}'.format(SCUBA_YML, path) \
+ '\nStopping at filesystem boundary (SCUBA_DISCOVERY_ACROSS_FILESYSTEM not set).'
raise ConfigNotFoundError(msg)
# Traverse up directory hierarchy
path, rest = os.path.split(path)
if not rest:
raise ConfigNotFoundError('{} not found here or any parent directories'.format(SCUBA_YML))
# Accumulate the relative path back to where we started
rel = os.path.join(rest, rel)
def _process_script_node(node, name):
'''Process a script-type node
This handles nodes that follow the *Common script schema*,
as outlined in doc/yaml-reference.md.
'''
if isinstance(node, str):
# The script is just the text itself
return [node]
if isinstance(node, dict):
# There must be a "script" key, which must be a list of strings
script = node.get('script')
if not script:
raise ConfigError("{}: must have a 'script' subkey".format(name))
if isinstance(script, list):
return script
if isinstance(script, str):
return [script]
raise ConfigError("{}.script: must be a string or list".format(name))
raise ConfigError("{}: must be string or dict".format(name))
def _process_environment(node, name):
# Environment can be either a list of strings ("KEY=VALUE") or a mapping
# Environment keys and values are always strings
result = {}
if not node:
pass
elif isinstance(node, dict):
for k, v in node.items():
if v is None:
v = os.getenv(k, '')
result[k] = str(v)
elif isinstance(node, list):
for e in node:
k, v = parse_env_var(e)
result[k] = v
else:
raise ConfigError("'{}' must be list or mapping, not {}".format(
name, type(node).__name__))
return result
def _get_nullable_str(data, key):
# N.B. We can't use data.get() here, because that might return
# None, leading to ambiguity between the key being absent or set
# to a null value.
#
# "Note that a null is different from an empty string and that a
# mapping entry with some key and a null value is valid and
# different from not having that key in the mapping."
# - http://yaml.org/type/null.html
if not key in data:
return None
ep = data[key]
# We represent a null value as an empty string.
if isinstance(ep, OverrideNone):
ep = OverrideStr('')
elif ep is None:
ep = ''
if not isinstance(ep, str):
raise ConfigError("'{}' must be a string, not {}".format(
key, type(ep).__name__))
return ep
def _get_entrypoint(data):
return _get_nullable_str(data, 'entrypoint')
def _get_docker_args(data):
args = _get_nullable_str(data, 'docker_args')
if args is not None:
override = isinstance(args, OverrideMixin)
args = shlex.split(args)
if override:
args = OverrideList(args)
return args
def _get_typed_val(data, key, type_):
v = data.get(key)
if v is not None and not isinstance(v, type_):
raise ConfigError("'{}' must be a {}, not {}".format(
key, type_.__name__, type(v).__name__))
return v
def _get_dict(data, key):
return _get_typed_val(data, key, dict)
def _get_delimited_str_list(data, key, sep):
s = _get_typed_val(data, key, str)
return s.split(sep) if s else []
def _get_volumes(data):
voldata = _get_dict(data, 'volumes')
if voldata is None:
return None
vols = {}
for cpath, v in voldata.items():
cpath = _expand_path(cpath)
vols[cpath] = ScubaVolume.from_dict(cpath, v)
return vols
def _expand_path(in_str):
try:
output = expand_env_vars(in_str)
except KeyError as ke:
# pylint: disable=raise-missing-from
raise ConfigError("Unset environment variable '{}' used in '{}'".format(ke.args[0], in_str))
except ValueError as ve:
raise ConfigError("Unable to expand string '{}' due to parsing "
"errors".format(in_str)) from ve
return output
class ScubaVolume:
def __init__(self, container_path, host_path=None, options=None):
self.container_path = container_path
self.host_path = host_path
self.options = options or []
@classmethod
def from_dict(cls, cpath, node):
# Treat a null node as an empty dict
if node is None:
node = {}
# Simple form:
# volumes:
# /foo: /host/foo
if isinstance(node, str):
return cls(
container_path = cpath,
host_path = _expand_path(node),
)
# Complex form
# volumes:
# /foo:
# hostpath: /host/foo
# options: ro,z
if isinstance(node, dict):
hpath = node.get('hostpath')
if hpath is None:
raise ConfigError("Volume {} must have a 'hostpath' subkey".format(cpath))
return cls(
container_path = cpath,
host_path = _expand_path(hpath),
options = _get_delimited_str_list(node, 'options', ','),
)
raise ConfigError("{}: must be string or dict".format(cpath))
def get_vol_opt(self):
if not self.host_path:
raise NotImplementedError("No anonymous volumes for now")
return make_vol_opt(self.host_path, self.container_path, self.options)
class ScubaAlias:
def __init__(self, name, script, image=None, entrypoint=None,
environment=None, shell=None, as_root=None, docker_args=None,
volumes=None):
self.name = name
self.script = script
self.image = image
self.entrypoint = entrypoint
self.environment = environment
self.shell = shell
self.as_root = bool(as_root)
self.docker_args = docker_args
self.volumes = volumes
@classmethod
def from_dict(cls, name, node):
script = _process_script_node(node, name)
if isinstance(node, dict): # Rich alias
return cls(
name = name,
script = script,
image = node.get('image'),
entrypoint = _get_entrypoint(node),
environment = _process_environment(
node.get('environment'),
'{}.{}'.format(name, 'environment')),
shell = node.get('shell'),
as_root = node.get('root'),
docker_args = _get_docker_args(node),
volumes = _get_volumes(node),
)
return cls(name=name, script=script)
class ScubaConfig:
def __init__(self, **data):
optional_nodes = (
'image', 'aliases', 'hooks', 'entrypoint', 'environment', 'shell',
'docker_args', 'volumes',
)
# Check for unrecognized nodes
extra = [n for n in data if not n in optional_nodes]
if extra:
raise ConfigError('{}: Unrecognized node{}: {}'.format(SCUBA_YML,
's' if len(extra) > 1 else '', ', '.join(extra)))
self._image = data.get('image')
self.shell = data.get('shell', DEFAULT_SHELL)
self.entrypoint = _get_entrypoint(data)
self.docker_args = _get_docker_args(data)
self.volumes = _get_volumes(data)
self._load_aliases(data)
self._load_hooks(data)
self._load_environment(data)
def _load_aliases(self, data):
self.aliases = {}
for name, node in data.get('aliases', {}).items():
if ' ' in name:
raise ConfigError('Alias names cannot contain spaces')
self.aliases[name] = ScubaAlias.from_dict(name, node)
def _load_hooks(self, data):
self.hooks = {}
for name in ('user', 'root',):
node = data.get('hooks', {}).get(name)
if node:
hook = _process_script_node(node, name)
self.hooks[name] = hook
def _load_environment(self, data):
self.environment = _process_environment(data.get('environment'), 'environment')
@property
def image(self):
if not self._image:
raise ConfigError("Top-level 'image' not set")
return self._image
def load_config(path):
try:
with open(path, 'r') as f:
data = yaml.load(f, Loader)
except IOError as e:
raise ConfigError('Error opening {}: {}'.format(SCUBA_YML, e))
except yaml.YAMLError as e:
raise ConfigError('Error loading {}: {}'.format(SCUBA_YML, e))
return ScubaConfig(**(data or {}))
|
|
#!/usr/bin/env python
import unittest
import icetotgd
import datetime
class T(unittest.TestCase):
fake_files = {
'trivial.xml': '''<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE tv SYSTEM "http://iceguide.icetv.com.au/iceguide/iceguide.dtd">
<tv>
<channel id="2300">
<display-name>TwentyThree</display-name>
<region-name>Melbourne</region-name>
<lcn>23</lcn>
</channel>
<programme start="20091030110000 +0000" stop="20091030113000 +0000" channel="2300">
<title lang="en">Spiderman</title>
<sub-title lang="en">The One Where Spiderman Eats Salad</sub-title>
<desc lang="en">Action is his reward.</desc>
<category lang="en">News</category>
<category lang="en">Sport</category>
<episode-num system="icetv">169-0</episode-num>
</programme>
<programme start="20091104093500 +0000" stop="20091104110500 +0000" channel="32">
<title lang="en">Soccer: UEFA Champions League</title>
<sub-title lang="en">TBA</sub-title>
<category lang="en">Football</category>
<category lang="en">Soccer</category>
<category lang="en">Sport</category>
<episode-num system="icetv">14328-72386</episode-num>
<previously-shown start="20090917"/>
</programme>
<programme start="20091105093500 +0000" stop="20091105110500 +0000" channel="32">
<title lang="en">Suburb of the Moths</title>
<desc lang="en">A suburb is terrorised by shrimp moths from hell.</desc>
<credits>
<director>Dave Keenan</director>
<actor>Marvin O'Gravel Balloon-Face</actor>
<actor>Oliver Boliver Butt</actor>
<actor>Zanzibar Buck-Buck McFate</actor>
</credits>
<date>1996</date>
<category lang="en">Movie</category>
<subtitles type="teletext"/>
<rating system="">
<value>M</value>
</rating>
<previously-shown/>
</programme>
<programme start="20091030110000 +0000" stop="20091030113000 +0000" channel="2300">
<category lang="en"></category>
</programme>
</tv>
'''}
programme = \
{'title': 'Spiderman',
'sub-title': 'The One Where Spiderman Eats Salad',
'desc': 'Action is his reward.',
'categories': ['News', 'Sport'],
'channel': '2300',
'start': datetime.datetime(2009, 10, 30, 11, 0),
'stop': datetime.datetime(2009, 10, 30, 11, 30)}
def open_fake(self, filename):
from StringIO import StringIO
return StringIO(self.fake_files[filename])
def setUp(self):
self.parser = icetotgd.IceToTgd()
self.parser.use_xml_file(self.open_fake('trivial.xml'))
def test_can_load_channels(self):
self.assertEqual(self.parser.channels,
{'2300': {'lcn': '23',
'display-name': 'TwentyThree'}})
def test_can_load_one_programme(self):
p = self.parser.programmes[0]
self.assertEqual(p['title'], 'Spiderman')
self.assertEqual(p['sub-title'], 'The One Where Spiderman Eats Salad')
self.assertEqual(p['desc'], 'Action is his reward.')
self.assertEqual(p['categories'], ['News', 'Sport'])
self.assertEqual(p['channel'], '2300')
self.assertTrue('rating' not in p)
self.assertEqual(p['start'], datetime.datetime(2009, 10, 30, 11, 0))
self.assertEqual(p['stop'], datetime.datetime(2009, 10, 30, 11, 30))
def test_time_to_tgd(self):
start = datetime.datetime(2009, 10, 30, 11, 30)
tgd_start = icetotgd.tgd_time_from_timestamp(start)
self.assertEqual(tgd_start, '2009/10/30 22:30')
def test_duration_to_tgd(self):
start = datetime.datetime(2009, 10, 30, 11, 00)
stop = datetime.datetime(2009, 10, 30, 11, 30)
duration = stop - start
tgd_duration = icetotgd.tgd_duration_from_timedelta(duration)
self.assertEqual(tgd_duration, '30')
def test_programme_to_tgd(self):
tgd_line = self.parser.programme_to_tgd(self.programme)
self.assertEqual(tgd_line,
'23\t2009/10/30 22:00\t30\tSpiderman\tThe One Where Spiderman Eats Salad [News/Sport]\tAction is his reward.\tX\tN')
def test_can_parse_programme_xml_without_desc(self):
p = self.parser.programmes[1]
self.assertTrue('desc' not in p)
def test_programme_xml_with_year(self):
p = self.parser.programmes[2]
self.assertEqual(p['date'], '1996')
def test_tgd_title_includes_year(self):
p = self.parser.programmes[2]
title = self.parser.tgd_title(p)
self.assertEqual(title, 'Suburb of the Moths (1996)')
def test_unrated_programmes_are_rated_x(self):
p = self.parser.programmes[0]
rating = self.parser.tgd_rating(p)
self.assertEqual(rating, 'X')
def test_can_get_programme_rating(self):
p = self.parser.programmes[2]
rating = self.parser.tgd_rating(p)
self.assertEqual(rating, 'M')
def test_description_says_subtitles_if_they_exist(self):
p = self.parser.programmes[2]
description = self.parser.tgd_description(p)
self.assertTrue(description.index('[Subtitles]'))
def test_description_doesnt_say_repeat_if_its_not_a_repeat(self):
p = self.parser.programmes[0]
description = self.parser.tgd_description(p)
self.assertTrue(description.find('[Repeat]') == -1)
def test_description_says_repeat_if_its_a_repeat(self):
p = self.parser.programmes[2]
description = self.parser.tgd_description(p)
self.assertTrue(description.find('[Repeat]') != -1)
def test_description_says_repeat_with_date_if_its_a_repeat_with_a_known_date(self):
p = self.parser.programmes[1]
description = self.parser.tgd_description(p)
date = datetime.date(2009, 9, 17).strftime('%x')
self.assertTrue(description.find('[Repeat, last shown ' + date + ']') != -1)
def test_tgd_short_description_includes_category(self):
p = self.parser.programmes[0]
short_desc = self.parser.tgd_short_description(p)
self.assertEqual(short_desc, 'The One Where Spiderman Eats Salad [News/Sport]')
def test_blank_categories_arent_retained(self):
p = self.parser.programmes[3]
self.assertTrue('categories' not in p)
def test_can_convert_programme_xml_without_desc(self):
p = self.programme.copy()
p['desc'] = None
tgd_line = self.parser.programme_to_tgd(p)
self.assertEqual(tgd_line,
'23\t2009/10/30 22:00\t30\tSpiderman\tThe One Where Spiderman Eats Salad [News/Sport]\t\tX\tN')
def test_str_or_empty(self):
from icetotgd import str_or_empty
self.assertEqual('', str_or_empty(None))
self.assertEqual('', str_or_empty(''))
self.assertEqual('foo', str_or_empty('foo'))
def test_filter_None_from_dict(self):
d = {'a': 3, 'b': [6, 16], 'c': None, 'd': 9}
d2 = icetotgd.filter_dict(d, lambda k,v: v is not None)
self.assertEqual(d2, {'a': 3, 'b': [6, 16], 'd': 9})
def test_filter_None_and_empty_list_from_dict(self):
def listp(x):
return type(x) is type([])
d = {'a': 3, 'b': [6, 16], 'c': None, 'd': []}
d2 = icetotgd.filter_dict(
d,
lambda k,v: (v is not None) and (not listp(v) or v))
self.assertEqual(d2, {'a': 3, 'b': [6, 16]})
def test_filename_from_programme(self):
filename = icetotgd.tgd_filename_from_programme(self.programme)
self.assertEqual(filename, '20091030.tgd')
def test_programme_xml_with_director(self):
p = self.parser.programmes[2]
self.assertEqual(p['directors'], ['Dave Keenan'])
def test_programme_xml_with_actors(self):
p = self.parser.programmes[2]
self.assertEqual(p['actors'],
["Marvin O'Gravel Balloon-Face",
'Oliver Boliver Butt',
'Zanzibar Buck-Buck McFate'])
def test_no_director_means_no_text_for_description(self):
p = self.parser.programmes[0]
text = self.parser.tgd_director_text(p)
self.assertEqual(text, None)
def test_single_director_text_for_description(self):
p = self.parser.programmes[2]
text = self.parser.tgd_director_text(p)
self.assertEqual(text, 'Dave Keenan')
def test_no_actors_means_no_text_for_description(self):
p = self.parser.programmes[0]
text = self.parser.tgd_cast_text(p)
self.assertEqual(text, None)
def test_three_actors_means_some_text_for_description(self):
p = self.parser.programmes[2]
text = self.parser.tgd_cast_text(p)
self.assertEqual(text, "Marvin O'Gravel Balloon-Face, Oliver Boliver Butt, Zanzibar Buck-Buck McFate")
def test_tgd_description_has_cast_and_crew(self):
p = self.parser.programmes[2]
text = self.parser.tgd_description(p)
self.assertTrue(text.index("Marvin O'Gravel Balloon-Face"))
self.assertTrue(text.index("Dave Keenan"))
if __name__=='__main__':
unittest.main()
|
|
'''
Created on 28 Mar 2013
@author: Jonathan Custance
'''
import sys
import xml.etree.ElementTree as ET
import Category
import Attribute
import Action
import Link
import Mixin
import Collection
import Model
import logging
class Parser(object):
'''
Parse a set of XML model files and return a Models object containing all the
models that have been successfully parsed.
'''
def __init__(self, files):
'''
Constructor
@param files: list of files to parse
'''
self.files = files
self.cats = Category.Categories()
self.models = Model.Models()
# Add the standard categories from OCCI Core
self._built_in_model = Model.Model("core", "OCCI Core categories", "1.0,0")
self.models.add(self._built_in_model)
# Entity
entity = Category.Category("entity", "http://schemas.ogf.org/occi/core#", "/entity/", None, "kind", self._built_in_model)
entity.addattr(Attribute.Attribute("id", "string", "true", "true", None, "true"))
entity.addattr(Attribute.Attribute("title", "string", "false", "false", None, "false"))
self.cats.add(entity)
# Resource
resource = Category.Category("resource", "http://schemas.ogf.org/occi/core#", "/resource/",
"http://schemas.ogf.org/occi/core#entity", "kind", self._built_in_model)
resource.addattr(Attribute.Attribute("summary", "string", "false", "false", None, "false"))
self.cats.add(resource)
# Link
link = Category.Category("link", "http://schemas.ogf.org/occi/core#", "/link/",
"http://schemas.ogf.org/occi/core#entity", "link", self._built_in_model)
link.addattr(Attribute.Attribute("source", "string", "true", "false", None, "false"))
link.addattr(Attribute.Attribute("target", "string", "true", "false", None, "false"))
self.cats.add(link)
def parse(self):
'''
Do the work of the Parser class
'''
# Parse each file / model
for f in self.files:
self._parse(f)
# Resolve rels and collections/instances
self.cats.resolve()
return self.models
def _addcoll(self, colls, cat):
'''
Parse and add collection attributes to a category
@param colls: a collection of XML Elements
@param cat: the Category to add the collections to
'''
for coll in colls:
name = coll.get("name")
if name == None:
logging.warn("Category "+cat.term+" - invalid collection")
continue
logging.info("Category "+cat.term+" collection "+name)
try:
cat.addcoll(Collection.Collection(name, coll.get("category"), coll.get("multiplicity"),
coll.get("legacytype"), coll.get("scope"), coll.get("script")))
except:
logging.error("Category "+cat.term+"Problem processing collection "+id)
logging.error(sys.exc_info())
def _addactions(self, category, cat):
'''
Parse and add all actions to a category
@param category: a collection of XML Elements
@param cat: the Category to add the collections to
'''
for action in category.findall("actions/action"):
actionid = action.get("id")
if actionid == None:
logging.warn("Category "+cat.term+" - invalid action")
continue
logging.info("Category "+cat.term+" action "+actionid)
try:
cat.addaction(Action.Action(actionid))
except:
logging.error("Category "+cat.term+" Problem processing action "+actionid)
logging.error(sys.exc_info())
def _addlinks(self, category, cat):
'''
Parse and add all links to a category
@param category: a collection of XML Elements
@param cat: the Category to add the collections to
'''
for link in category.findall("links/link"):
linkid = link.get("id")
if linkid == None:
logging.warn("Category "+cat.term+" - invalid link")
continue
logging.info("Category "+cat.term+" link "+linkid)
try:
cat.addlink(Link.Link(linkid))
except:
logging.error("Category "+cat.term+" Problem processing link "+linkid)
logging.error(sys.exc_info())
def _addmixins(self, category, cat):
'''
Parse and add all mixins to a category
@param category: a collection of XML Elements
@param cat: the Category to add the collections to
'''
for mixin in category.findall("mixins/mixin"):
mixinid = mixin.get("id")
if mixinid == None:
logging.warn("Category "+cat.term+" - invalid mixin")
continue
logging.info("Category "+cat.term+" mixin "+mixinid)
try:
cat.addmixin(Mixin.Mixin(mixinid))
except:
logging.error("Category "+cat.term+" Problem processing mixin "+mixinid)
logging.error(sys.exc_info())
def _addattrs(self, category, cat):
'''
Parse and add all attributes to a category
@param category: a collection of XML Elements
@param cat: the Category to add the collections to
'''
# Parse attributes
for attr in category.findall("attributes/attribute"):
name = attr.get("name")
if name == None:
logging.warn("Category"+cat.term+" - invalid attribute")
continue
logging.info("Category "+cat.term+" attribute "+name)
try:
cat.addattr(Attribute.Attribute(name, attr.get("type"), attr.get("required"),
attr.get("immutable"), attr.get("validation"),
attr.get("index"), attr.get("default"), attr.get("units"), attr.get("legacytype"),
attr.get("scope"), attr.get("script")))
except:
logging.error("Category "+cat.term+"Problem processing attribute "+id)
logging.error(sys.exc_info())
# Parse instances
colls = category.findall("attributes/instance")
self._addcoll(colls, cat)
# Parse collections
colls = category.findall("attributes/collection")
self._addcoll(colls, cat)
def _parse(self, f):
'''
Parse a given XML file
@param f File name
'''
logging.info("Processing "+f)
try:
tree = ET.parse(f)
root = tree.getroot()
# TODO schema version check etc.
# TODO checking for single model
# TODO warn about unknown XML nodes
try:
xmlmodel = root.find("model")
model = Model.Model(xmlmodel.get("name"), xmlmodel.get("description"),
xmlmodel.get("version"), xmlmodel.get("namespace"))
logging.info("Model is "+model.name+" (v. "+model.version+")")
except:
logging.error("Problem processing model")
return
# Find all categories
for category in xmlmodel.findall("category"):
term = category.get("term")
if term == None:
logging.warn("No category provided")
continue
logging.info("Category "+term)
# Add a category
try:
scheme, klass, location, rel, structName, headerFilename = category.get("scheme"), category.get("class"), category.get("location"), category.get("rel"), category.get("structname"), category.get("headerfilename")
cat = Category.Category(term, scheme, location, rel, klass, model, structName, headerFilename)
self._addattrs(category, cat)
self._addactions(category, cat)
self._addlinks(category, cat)
self._addmixins(category, cat)
model.add(cat)
self.cats.add(cat)
except:
logging.error("Problem processing category "+term)
logging.error(sys.exc_info())
# Add this model to the models collection
self.models.add(model)
except:
logging.error("Problem parsing "+f)
logging.error(sys.exc_info())
|
|
# Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test built in connection-pooling with threads."""
import gc
import random
import sys
import threading
import time
from pymongo import MongoClient
from pymongo.errors import (AutoReconnect,
ConnectionFailure,
DuplicateKeyError,
ExceededMaxWaiters)
sys.path[0:0] = [""]
from pymongo.network import socket_closed
from pymongo.pool import Pool, PoolOptions
from test import host, port, SkipTest, unittest, client_context
from test.utils import (get_pool,
joinall,
delay,
one,
rs_or_single_client)
@client_context.require_connection
def setUpModule():
pass
N = 10
DB = "pymongo-pooling-tests"
def gc_collect_until_done(threads, timeout=60):
start = time.time()
running = list(threads)
while running:
assert (time.time() - start) < timeout, "Threads timed out"
for t in running:
t.join(0.1)
if not t.isAlive():
running.remove(t)
gc.collect()
class MongoThread(threading.Thread):
"""A thread that uses a MongoClient."""
def __init__(self, client):
super(MongoThread, self).__init__()
self.daemon = True # Don't hang whole test if thread hangs.
self.client = client
self.db = self.client[DB]
self.passed = False
def run(self):
self.run_mongo_thread()
self.passed = True
def run_mongo_thread(self):
raise NotImplementedError
class InsertOneAndFind(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
rand = random.randint(0, N)
_id = self.db.sf.insert_one({"x": rand}).inserted_id
assert rand == self.db.sf.find_one(_id)["x"]
class Unique(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
self.db.unique.insert_one({}) # no error
class NonUnique(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
try:
self.db.unique.insert_one({"_id": "jesse"})
except DuplicateKeyError:
pass
else:
raise AssertionError("Should have raised DuplicateKeyError")
class Disconnect(MongoThread):
def run_mongo_thread(self):
for _ in range(N):
self.client.close()
class SocketGetter(MongoThread):
"""Utility for TestPooling.
Checks out a socket and holds it forever. Used in
test_no_wait_queue_timeout, test_wait_queue_multiple, and
test_no_wait_queue_multiple.
"""
def __init__(self, client, pool):
super(SocketGetter, self).__init__(client)
self.state = 'init'
self.pool = pool
self.sock = None
def run_mongo_thread(self):
self.state = 'get_socket'
# Pass 'checkout' so we can hold the socket.
with self.pool.get_socket({}, checkout=True) as sock:
self.sock = sock
self.state = 'sock'
def __del__(self):
if self.sock:
self.sock.close()
def run_cases(client, cases):
threads = []
n_runs = 5
for case in cases:
for i in range(n_runs):
t = case(client)
t.start()
threads.append(t)
for t in threads:
t.join()
for t in threads:
assert t.passed, "%s.run() threw an exception" % repr(t)
class _TestPoolingBase(unittest.TestCase):
"""Base class for all connection-pool tests."""
def setUp(self):
self.c = rs_or_single_client()
db = self.c[DB]
db.unique.drop()
db.test.drop()
db.unique.insert_one({"_id": "jesse"})
db.test.insert_many([{} for _ in range(10)])
def create_pool(self, pair=(host, port), *args, **kwargs):
return Pool(pair, PoolOptions(*args, **kwargs))
class TestPooling(_TestPoolingBase):
def test_max_pool_size_validation(self):
self.assertRaises(
ValueError, MongoClient, host=host, port=port,
maxPoolSize=-1)
self.assertRaises(
ValueError, MongoClient, host=host, port=port,
maxPoolSize='foo')
c = MongoClient(host=host, port=port, maxPoolSize=100)
self.assertEqual(c.max_pool_size, 100)
def test_no_disconnect(self):
run_cases(self.c, [NonUnique, Unique, InsertOneAndFind])
def test_disconnect(self):
run_cases(self.c, [InsertOneAndFind, Disconnect, Unique])
def test_pool_reuses_open_socket(self):
# Test Pool's _check_closed() method doesn't close a healthy socket.
cx_pool = self.create_pool(max_pool_size=10)
cx_pool._check_interval_seconds = 0 # Always check.
with cx_pool.get_socket({}) as sock_info:
pass
with cx_pool.get_socket({}) as new_sock_info:
self.assertEqual(sock_info, new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_get_socket_and_exception(self):
# get_socket() returns socket after a non-network error.
cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1)
with self.assertRaises(ZeroDivisionError):
with cx_pool.get_socket({}) as sock_info:
1 / 0
# Socket was returned, not closed.
with cx_pool.get_socket({}) as new_sock_info:
self.assertEqual(sock_info, new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_closed_socket(self):
# Test that Pool removes explicitly closed socket.
cx_pool = self.create_pool()
with cx_pool.get_socket({}) as sock_info:
# Use SocketInfo's API to close the socket.
sock_info.close()
self.assertEqual(0, len(cx_pool.sockets))
def test_pool_removes_dead_socket(self):
# Test that Pool removes dead socket and the socket doesn't return
# itself PYTHON-344
cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1)
cx_pool._check_interval_seconds = 0 # Always check.
with cx_pool.get_socket({}) as sock_info:
# Simulate a closed socket without telling the SocketInfo it's
# closed.
sock_info.sock.close()
self.assertTrue(socket_closed(sock_info.sock))
with cx_pool.get_socket({}) as new_sock_info:
self.assertEqual(0, len(cx_pool.sockets))
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(1, len(cx_pool.sockets))
# Semaphore was released.
with cx_pool.get_socket({}):
pass
def test_return_socket_after_reset(self):
pool = self.create_pool()
with pool.get_socket({}) as sock:
pool.reset()
self.assertTrue(sock.closed)
self.assertEqual(0, len(pool.sockets))
def test_pool_check(self):
# Test that Pool recovers from two connection failures in a row.
# This exercises code at the end of Pool._check().
cx_pool = self.create_pool(max_pool_size=1,
connect_timeout=1,
wait_queue_timeout=1)
cx_pool._check_interval_seconds = 0 # Always check.
with cx_pool.get_socket({}) as sock_info:
# Simulate a closed socket without telling the SocketInfo it's
# closed.
sock_info.sock.close()
# Swap pool's address with a bad one.
address, cx_pool.address = cx_pool.address, ('foo.com', 1234)
with self.assertRaises(AutoReconnect):
with cx_pool.get_socket({}):
pass
# Back to normal, semaphore was correctly released.
cx_pool.address = address
with cx_pool.get_socket({}, checkout=True):
pass
def test_pool_with_fork(self):
# Test that separate MongoClients have separate Pools, and that the
# driver can create a new MongoClient after forking
if sys.platform == "win32":
raise SkipTest("Can't test forking on Windows")
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest("No multiprocessing module")
a = rs_or_single_client()
a.pymongo_test.test.drop()
a.pymongo_test.test.insert_one({'_id':1})
a.pymongo_test.test.find_one()
self.assertEqual(1, len(get_pool(a).sockets))
a_sock = one(get_pool(a).sockets)
def loop(pipe):
c = rs_or_single_client()
c.pymongo_test.test.find_one()
self.assertEqual(1, len(get_pool(c).sockets))
pipe.send(one(get_pool(c).sockets).sock.getsockname())
cp1, cc1 = Pipe()
cp2, cc2 = Pipe()
p1 = Process(target=loop, args=(cc1,))
p2 = Process(target=loop, args=(cc2,))
p1.start()
p2.start()
p1.join(1)
p2.join(1)
p1.terminate()
p2.terminate()
p1.join()
p2.join()
cc1.close()
cc2.close()
b_sock = cp1.recv()
c_sock = cp2.recv()
self.assertTrue(a_sock.sock.getsockname() != b_sock)
self.assertTrue(a_sock.sock.getsockname() != c_sock)
self.assertTrue(b_sock != c_sock)
# a_sock, created by parent process, is still in the pool
with get_pool(a).get_socket({}) as d_sock:
self.assertEqual(a_sock, d_sock)
def test_wait_queue_timeout(self):
wait_queue_timeout = 2 # Seconds
pool = self.create_pool(
max_pool_size=1, wait_queue_timeout=wait_queue_timeout)
with pool.get_socket({}) as sock_info:
start = time.time()
with self.assertRaises(ConnectionFailure):
with pool.get_socket({}):
pass
duration = time.time() - start
self.assertTrue(
abs(wait_queue_timeout - duration) < 1,
"Waited %.2f seconds for a socket, expected %f" % (
duration, wait_queue_timeout))
sock_info.close()
def test_no_wait_queue_timeout(self):
# Verify get_socket() with no wait_queue_timeout blocks forever.
pool = self.create_pool(max_pool_size=1)
# Reach max_size.
with pool.get_socket({}) as s1:
t = SocketGetter(self.c, pool)
t.start()
while t.state != 'get_socket':
time.sleep(0.1)
time.sleep(1)
self.assertEqual(t.state, 'get_socket')
while t.state != 'sock':
time.sleep(0.1)
self.assertEqual(t.state, 'sock')
self.assertEqual(t.sock, s1)
s1.close()
def test_wait_queue_multiple(self):
wait_queue_multiple = 3
pool = self.create_pool(
max_pool_size=2, wait_queue_multiple=wait_queue_multiple)
# Reach max_size sockets.
with pool.get_socket({}):
with pool.get_socket({}):
# Reach max_size * wait_queue_multiple waiters.
threads = []
for _ in range(6):
t = SocketGetter(self.c, pool)
t.start()
threads.append(t)
time.sleep(1)
for t in threads:
self.assertEqual(t.state, 'get_socket')
with self.assertRaises(ExceededMaxWaiters):
with pool.get_socket({}):
pass
def test_no_wait_queue_multiple(self):
pool = self.create_pool(max_pool_size=2)
socks = []
for _ in range(2):
# Pass 'checkout' so we can hold the socket.
with pool.get_socket({}, checkout=True) as sock:
socks.append(sock)
threads = []
for _ in range(30):
t = SocketGetter(self.c, pool)
t.start()
threads.append(t)
time.sleep(1)
for t in threads:
self.assertEqual(t.state, 'get_socket')
for socket_info in socks:
socket_info.close()
class TestPoolMaxSize(_TestPoolingBase):
def test_max_pool_size(self):
max_pool_size = 4
c = rs_or_single_client(maxPoolSize=max_pool_size)
collection = c[DB].test
# Need one document.
collection.drop()
collection.insert_one({})
# nthreads had better be much larger than max_pool_size to ensure that
# max_pool_size sockets are actually required at some point in this
# test's execution.
cx_pool = get_pool(c)
nthreads = 10
threads = []
lock = threading.Lock()
self.n_passed = 0
def f():
for _ in range(5):
collection.find_one({'$where': delay(0.1)})
assert len(cx_pool.sockets) <= max_pool_size
with lock:
self.n_passed += 1
for i in range(nthreads):
t = threading.Thread(target=f)
threads.append(t)
t.start()
joinall(threads)
self.assertEqual(nthreads, self.n_passed)
self.assertTrue(len(cx_pool.sockets) > 1)
self.assertEqual(max_pool_size, cx_pool._socket_semaphore.counter)
def test_max_pool_size_none(self):
c = rs_or_single_client(maxPoolSize=None)
collection = c[DB].test
# Need one document.
collection.drop()
collection.insert_one({})
cx_pool = get_pool(c)
nthreads = 10
threads = []
lock = threading.Lock()
self.n_passed = 0
def f():
for _ in range(5):
collection.find_one({'$where': delay(0.1)})
with lock:
self.n_passed += 1
for i in range(nthreads):
t = threading.Thread(target=f)
threads.append(t)
t.start()
joinall(threads)
self.assertEqual(nthreads, self.n_passed)
self.assertTrue(len(cx_pool.sockets) > 1)
def test_max_pool_size_with_connection_failure(self):
# The pool acquires its semaphore before attempting to connect; ensure
# it releases the semaphore on connection failure.
test_pool = Pool(
('example.com', 27017),
PoolOptions(
max_pool_size=1,
connect_timeout=1,
socket_timeout=1,
wait_queue_timeout=1))
# First call to get_socket fails; if pool doesn't release its semaphore
# then the second call raises "ConnectionFailure: Timed out waiting for
# socket from pool" instead of AutoReconnect.
for i in range(2):
with self.assertRaises(AutoReconnect) as context:
with test_pool.get_socket({}, checkout=True):
pass
# Testing for AutoReconnect instead of ConnectionFailure, above,
# is sufficient right *now* to catch a semaphore leak. But that
# seems error-prone, so check the message too.
self.assertNotIn('waiting for socket from pool',
str(context.exception))
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'ModelBase',
]
import inspect
from collections import abc, OrderedDict
from typing import Any, Callable, Dict, List, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, TypeVar, Union, cast, get_type_hints
T = TypeVar('T')
def verify_object_against_type(x: Any, typ: Type[T]) -> T:
'''Verifies that the object is compatible to the specified type (types from the typing package can be used).'''
#TODO: Merge with parse_object_from_struct_based_on_type which has almost the same code
if typ is type(None):
if x is None:
return x
else:
raise TypeError('Error: Object "{}" is not None.'.format(x))
if typ is Any or type(typ) is TypeVar:
return x
try: #isinstance can fail for generics
if isinstance(x, typ):
return cast(typ, x)
except:
pass
if hasattr(typ, '__origin__'): #Handling generic types
if typ.__origin__ is Union: #Optional == Union
exception_map = {}
possible_types = typ.__args__
if type(None) in possible_types and x is None: #Shortcut for Optional[] tests. Can be removed, but the exceptions will be more noisy.
return x
for possible_type in possible_types:
try:
verify_object_against_type(x, possible_type)
return x
except Exception as ex:
exception_map[possible_type] = ex
pass
#exception_lines = ['Exception for type {}: {}.'.format(t, e) for t, e in exception_map.items()]
exception_lines = [str(e) for t, e in exception_map.items()]
exception_lines.append('Error: Object "{}" is incompatible with type "{}".'.format(x, typ))
raise TypeError('\n'.join(exception_lines))
#not Union => not None
if x is None:
raise TypeError('Error: None object is incompatible with type {}'.format(typ))
#assert isinstance(x, typ.__origin__)
generic_type = typ.__origin__ or getattr(typ, '__extra__', None) #In python <3.7 typing.List.__origin__ == None; Python 3.7 has working __origin__, but no __extra__ TODO: Remove the __extra__ once we move to Python 3.7
if generic_type in [list, List, abc.Sequence, abc.MutableSequence, Sequence, MutableSequence] and type(x) is not str: #! str is also Sequence
if not isinstance(x, generic_type):
raise TypeError('Error: Object "{}" is incompatible with type "{}"'.format(x, typ))
type_args = typ.__args__ if typ.__args__ is not None else (Any, Any) #Workaround for Python <3.7 (where Mapping.__args__ is None)
inner_type = type_args[0]
for item in x:
verify_object_against_type(item, inner_type)
return x
elif generic_type in [dict, Dict, abc.Mapping, abc.MutableMapping, Mapping, MutableMapping, OrderedDict]:
if not isinstance(x, generic_type):
raise TypeError('Error: Object "{}" is incompatible with type "{}"'.format(x, typ))
type_args = typ.__args__ if typ.__args__ is not None else (Any, Any) #Workaround for Python <3.7 (where Mapping.__args__ is None)
inner_key_type = type_args[0]
inner_value_type = type_args[1]
for k, v in x.items():
verify_object_against_type(k, inner_key_type)
verify_object_against_type(v, inner_value_type)
return x
else:
raise TypeError('Error: Unsupported generic type "{}". type.__origin__ or type.__extra__ == "{}"'.format(typ, generic_type))
raise TypeError('Error: Object "{}" is incompatible with type "{}"'.format(x, typ))
def parse_object_from_struct_based_on_type(struct: Any, typ: Type[T]) -> T:
'''Constructs an object from structure (usually dict) based on type. Supports list and dict types from the typing package plus Optional[] and Union[] types.
If some type is a class that has .from_dict class method, that method is used for object construction.
'''
if typ is type(None):
if struct is None:
return None
else:
raise TypeError('Error: Structure "{}" is not None.'.format(struct))
if typ is Any or type(typ) is TypeVar:
return struct
try: #isinstance can fail for generics
#if (isinstance(struct, typ)
# and not (typ is Sequence and type(struct) is str) #! str is also Sequence
# and not (typ is int and type(struct) is bool) #! bool is int
#):
if type(struct) is typ:
return struct
except:
pass
if hasattr(typ, 'from_dict'):
try: #More informative errors
return typ.from_dict(struct)
except Exception as ex:
raise TypeError('Error: {}.from_dict(struct={}) failed with exception:\n{}'.format(typ.__name__, struct, str(ex)))
if hasattr(typ, '__origin__'): #Handling generic types
if typ.__origin__ is Union: #Optional == Union
results = {}
exception_map = {}
possible_types = list(typ.__args__)
#if type(None) in possible_types and struct is None: #Shortcut for Optional[] tests. Can be removed, but the exceptions will be more noisy.
# return None
#Hack for Python <3.7 which for some reason "simplifies" Union[bool, int, ...] to just Union[int, ...]
if int in possible_types:
possible_types = possible_types + [bool]
for possible_type in possible_types:
try:
obj = parse_object_from_struct_based_on_type(struct, possible_type)
results[possible_type] = obj
except Exception as ex:
exception_map[possible_type] = ex
pass
#Single successful parsing.
if len(results) == 1:
return list(results.values())[0]
if len(results) > 1:
raise TypeError('Error: Structure "{}" is ambiguous. It can be parsed to multiple types: {}.'.format(struct, list(results.keys())))
exception_lines = [str(e) for t, e in exception_map.items()]
exception_lines.append('Error: Structure "{}" is incompatible with type "{}" - none of the types in Union are compatible.'.format(struct, typ))
raise TypeError('\n'.join(exception_lines))
#not Union => not None
if struct is None:
raise TypeError('Error: None structure is incompatible with type {}'.format(typ))
#assert isinstance(x, typ.__origin__)
generic_type = typ.__origin__ or getattr(typ, '__extra__', None) #In python <3.7 typing.List.__origin__ == None; Python 3.7 has working __origin__, but no __extra__ TODO: Remove the __extra__ once we move to Python 3.7
if generic_type in [list, List, abc.Sequence, abc.MutableSequence, Sequence, MutableSequence] and type(struct) is not str: #! str is also Sequence
if not isinstance(struct, generic_type):
raise TypeError('Error: Structure "{}" is incompatible with type "{}" - it does not have list type.'.format(struct, typ))
type_args = typ.__args__ if typ.__args__ is not None else (Any, Any) #Workaround for Python <3.7 (where Mapping.__args__ is None)
inner_type = type_args[0]
return [parse_object_from_struct_based_on_type(item, inner_type) for item in struct]
elif generic_type in [dict, Dict, abc.Mapping, abc.MutableMapping, Mapping, MutableMapping, OrderedDict]: #in Python <3.7 there is a difference between abc.Mapping and typing.Mapping
if not isinstance(struct, generic_type):
raise TypeError('Error: Structure "{}" is incompatible with type "{}" - it does not have dict type.'.format(struct, typ))
type_args = typ.__args__ if typ.__args__ is not None else (Any, Any) #Workaround for Python <3.7 (where Mapping.__args__ is None)
inner_key_type = type_args[0]
inner_value_type = type_args[1]
return {parse_object_from_struct_based_on_type(k, inner_key_type): parse_object_from_struct_based_on_type(v, inner_value_type) for k, v in struct.items()}
else:
raise TypeError('Error: Unsupported generic type "{}". type.__origin__ or type.__extra__ == "{}"'.format(typ, generic_type))
raise TypeError('Error: Structure "{}" is incompatible with type "{}". Structure is not the instance of the type, the type does not have .from_dict method and is not generic.'.format(struct, typ))
def convert_object_to_struct(obj, serialized_names: Mapping[str, str] = {}):
'''Converts an object to structure (usually a dict).
Serializes all properties that do not start with underscores.
If the type of some property is a class that has .to_dict class method, that method is used for conversion.
Used by the ModelBase class.
'''
signature = inspect.signature(obj.__init__) #Needed for default values
result = {}
for python_name in signature.parameters: #TODO: Make it possible to specify the field ordering regardless of the presence of default values
value = getattr(obj, python_name)
if python_name.startswith('_'):
continue
attr_name = serialized_names.get(python_name, python_name)
if hasattr(value, "to_dict"):
result[attr_name] = value.to_dict()
elif isinstance(value, list):
result[attr_name] = [(x.to_dict() if hasattr(x, 'to_dict') else x) for x in value]
elif isinstance(value, dict):
result[attr_name] = {k: (v.to_dict() if hasattr(v, 'to_dict') else v) for k, v in value.items()}
else:
param = signature.parameters.get(python_name, None)
if param is None or param.default == inspect.Parameter.empty or value != param.default:
result[attr_name] = value
return result
def parse_object_from_struct_based_on_class_init(cls : Type[T], struct: Mapping, serialized_names: Mapping[str, str] = {}) -> T:
'''Constructs an object of specified class from structure (usually dict) using the class.__init__ method.
Converts all constructor arguments to appropriate types based on the __init__ type hints.
Used by the ModelBase class.
Arguments:
serialized_names: specifies the mapping between __init__ parameter names and the structure key names for cases where these names are different (due to language syntax clashes or style differences).
'''
parameter_types = get_type_hints(cls.__init__) #Properlty resolves forward references
serialized_names_to_pythonic = {v: k for k, v in serialized_names.items()}
#If a pythonic name has a different original name, we forbid the pythonic name in the structure. Otherwise, this function would accept "python-styled" structures that should be invalid
forbidden_struct_keys = set(serialized_names_to_pythonic.values()).difference(serialized_names_to_pythonic.keys())
args = {}
for original_name, value in struct.items():
if original_name in forbidden_struct_keys:
raise ValueError('Use "{}" key instead of pythonic key "{}" in the structure: {}.'.format(serialized_names[original_name], original_name, struct))
python_name = serialized_names_to_pythonic.get(original_name, original_name)
param_type = parameter_types.get(python_name, None)
if param_type is not None:
args[python_name] = parse_object_from_struct_based_on_type(value, param_type)
else:
args[python_name] = value
return cls(**args)
class ModelBase:
'''Base class for types that can be converted to JSON-like dict structures or constructed from such structures.
The object fields, their types and default values are taken from the __init__ method arguments.
Override the _serialized_names mapping to control the key names of the serialized structures.
The derived class objects will have the .from_dict and .to_dict methods for conversion to or from structure. The base class constructor accepts the arguments map, checks the argument types and sets the object field values.
Example derived class:
class TaskSpec(ModelBase):
_serialized_names = {
'component_ref': 'componentRef',
'is_enabled': 'isEnabled',
}
def __init__(self,
component_ref: ComponentReference,
arguments: Optional[Mapping[str, ArgumentType]] = None,
is_enabled: Optional[Union[ArgumentType, EqualsPredicate, NotEqualsPredicate]] = None, #Optional property with default value
):
super().__init__(locals()) #Calling the ModelBase constructor to check the argument types and set the object field values.
task_spec = TaskSpec.from_dict("{'componentRef': {...}, 'isEnabled: {'and': {...}}}") # = instance of TaskSpec
task_struct = task_spec.to_dict() #= "{'componentRef': {...}, 'isEnabled: {'and': {...}}}"
'''
_serialized_names = {}
def __init__(self, args):
parameter_types = get_type_hints(self.__class__.__init__)
field_values = {k: v for k, v in args.items() if k != 'self' and not k.startswith('_')}
for k, v in field_values.items():
parameter_type = parameter_types.get(k, None)
if parameter_type is not None:
try:
verify_object_against_type(v, parameter_type)
except Exception as e:
raise TypeError('Argument for {} is not compatible with type "{}". Exception: {}'.format(k, parameter_type, e))
self.__dict__.update(field_values)
@classmethod
def from_dict(cls: Type[T], struct: Mapping) -> T:
return parse_object_from_struct_based_on_class_init(cls, struct, serialized_names=cls._serialized_names)
def to_dict(self) -> Mapping:
return convert_object_to_struct(self, serialized_names=self._serialized_names)
def _get_field_names(self):
return list(inspect.signature(self.__init__).parameters)
def __repr__(self):
return self.__class__.__name__ + '(' + ', '.join(param + '=' + repr(getattr(self, param)) for param in self._get_field_names()) + ')'
def __eq__(self, other):
return self.__class__ == other.__class__ and {k: getattr(self, k) for k in self._get_field_names()} == {k: getattr(other, k) for k in other._get_field_names()}
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(repr(self))
|
|
import sys
import threading
import cmd
import chess
from chess import polyglot
import tables
import os
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
logfile = open(os.path.join(__location__, 'input.log'), 'w')
ENGINE_NAME = 'simple UCI chess engine'
AUTHOR_NAME = 'Alexey Syromyatnikov'
class Analyzer(threading.Thread):
MIN_VALUE = -10 * tables.piece[chess.KING]
BETA = tables.piece[chess.ROOK]
ALPHA = -BETA
MAX_ITER = 2
MULTIPLIER = 4
MAX_NEGAMAX_ITER = 2
NEGAMAX_DIVISOR = 3
def set_default_values(self):
self.infinite = False
self.possible_first_moves = set()
self.max_depth = 3
self.number_of_nodes = 100
def __init__(self, call_if_ready, call_to_inform, opening_book):
super(Analyzer, self).__init__()
if opening_book:
self.opening_book = polyglot.open_reader(opening_book)
else:
self.opening_book = None
self.debug = False
self.set_default_values()
self.board = chess.Board()
self.is_working = threading.Event()
self.is_working.clear()
self.is_conscious = threading.Condition()
self.termination = threading.Event()
self.termination.clear()
self._call_if_ready = call_if_ready
self._call_to_inform = call_to_inform
self._bestmove = chess.Move.null()
@property
def bestmove(self):
return self._bestmove
class Communicant:
def __call__(self, func):
def wrap(instance, *args, **kwargs):
if instance.termination.is_set():
sys.exit()
with instance.is_conscious:
instance.is_conscious.notify()
result = func(instance, *args, **kwargs)
with instance.is_conscious:
instance.is_conscious.notify()
if instance.termination.is_set():
sys.exit()
return result
return wrap
@property
def number_of_pieces(self):
number = sum(1 for square in chess.SQUARES
if self.board.piece_at(square))
return number
def evaluate_material_position(self, phase, color, pieces):
value = 0
for piece in pieces:
squares = self.board.pieces(piece, color)
for square in squares:
value += tables.piece_square[phase][color][piece][square]
return value
def evaluate_material(self, color):
value = 0
for piece in chess.PIECE_TYPES:
squares = self.board.pieces(piece, color)
value += len(squares) * tables.piece[piece]
return value
def evaluate(self):
if self.board.is_checkmate():
return self.MIN_VALUE
if self.board.is_stalemate():
return 0
colors = list(map(int, chess.COLORS))
values = [0 for i in tables.PHASES]
phase = tables.OPENING
pieces = list(range(1, 6)) # pieces without king
for color in colors:
values[phase] += (self.evaluate_material_position
(phase, color, pieces)
*
(-1 + 2 * color))
values[tables.ENDING] = values[tables.OPENING]
for phase in tables.PHASES:
for color in colors:
values[phase] += (self.evaluate_material_position
(phase, color, (chess.KING,))
*
(-1 + 2 * color))
material = [0 for i in colors]
for color in colors:
material[color] = self.evaluate_material(color)
material_sum = sum(material)
for color in colors:
for phase in tables.PHASES:
values[phase] += material[color] * (-1 + 2 * color)
value = ((values[tables.OPENING] * material_sum +
values[tables.ENDING] * (tables.PIECE_SUM - material_sum))
// tables.PIECE_SUM)
if self.board.turn == chess.BLACK:
value *= -1
return value
def moves(self, depth):
if depth == 0 and self.possible_first_moves:
for move in self.board.legal_moves:
if move in self.possible_first_moves:
yield move
else:
for move in self.board.legal_moves:
yield move
def inner_negamax(self, depth, alpha, beta):
best_value = alpha
for move in self.moves(depth):
if self.debug:
self._call_to_inform('currmove {}'.format(move.uci()))
self.board.push(move)
value = -self.negamax(depth+1, -beta, -best_value)
if self.debug:
self._call_to_inform('string value {}'.format(value))
self.board.pop()
if value >= beta:
if depth == 0:
self._bestmove = move
return beta
elif value > best_value:
best_value = value
if depth == 0:
self._bestmove = move
elif depth == 0 and not bool(self._bestmove):
self._bestmove = move
return best_value
@Communicant()
def negamax(self, depth, alpha, beta):
if depth == self.max_depth or not self.is_working.is_set():
return self.evaluate()
if self.debug:
self._call_to_inform('depth {}'.format(depth))
self._call_to_inform('string alpha {} beta {}'.format(alpha, beta))
value = alpha
left_borders = [beta - (beta - alpha) // self.NEGAMAX_DIVISOR ** i
for i in range(self.MAX_NEGAMAX_ITER, -1, -1)]
for left in left_borders:
value = self.inner_negamax(depth, left, beta)
if value > left:
break
return value
def run(self):
while self.is_working.wait():
if self.termination.is_set():
sys.exit()
self._bestmove = chess.Move.null()
try:
if not self.possible_first_moves:
entry = self.opening_book.find(self.board)
self._bestmove = entry.move()
else:
for entry in self.opening_book.find_all(self.board):
move = entry.move()
if move in self.possible_first_moves:
self._bestmove = move
break
except:
pass
if not bool(self._bestmove):
middle = self.evaluate()
alpha = self.ALPHA
beta = self.BETA
for i in range(self.MAX_ITER):
value = self.negamax(depth=0,
alpha=middle+alpha,
beta=middle+beta)
if value >= middle + beta:
beta *= self.MULTIPLIER
elif value <= middle + alpha:
alpha *= self.MULTIPLIER
else:
break
self._call_to_inform('pv score cp {}'.format(value))
else:
self._call_to_inform('string opening')
if not self.infinite:
self._call_if_ready()
self.set_default_values()
self.is_working.clear()
class EngineShell(cmd.Cmd):
intro = ''
prompt = ''
file = None
opening_book_list = ['gm2001',
'komodo',
'Human']
opening_book = 'Human'
opening_dir = 'opening'
opening_book_extension = '.bin'
go_parameter_list = ['infinite', 'searchmoves', 'depth', 'nodes']
def __init__(self):
super(EngineShell, self).__init__()
self.postinitialized = False
def postinit(self):
opening_book = self.opening_book + self.opening_book_extension
opening_book = os.path.join(self.opening_dir, opening_book)
self.analyzer = Analyzer(
self.output_bestmove,
self.output_info,
os.path.join(__location__, opening_book))
self.analyzer.start()
self.postinitialized = True
def do_uci(self, arg):
print('id name', ENGINE_NAME)
print('id author', AUTHOR_NAME)
print('option name OpeningBook type combo', end=' ')
print('default', self.opening_book, end=' ')
for book in self.opening_book_list:
print('var', book, end=' ')
print()
print('uciok')
def do_debug(self, arg):
arg = arg.split()
if arg:
arg = arg[0]
else:
return
if arg == 'on':
self.analyzer.debug = True
elif arg == 'off':
self.analyzer.debug = False
def do_isready(self, arg):
if not self.postinitialized:
self.postinit()
if self.analyzer.is_working.is_set():
with self.analyzer.is_conscious:
self.analyzer.is_conscious.wait()
print('readyok')
def do_setoption(self, arg):
arg = arg.split()
try:
if arg[0] != 'name':
return
arg.pop(0)
if (arg[0] == 'OpeningBook' and
arg[1] == 'value' and
arg[2] in self.opening_book_list):
self.opening_book = arg[2]
except:
pass
def do_ucinewgame(self, arg):
pass
def do_position(self, arg):
arg = arg.split()
if not arg:
return
if self.analyzer.is_working.is_set():
'''
something strange
according to the protocol I should ignore it
*if I ignore it, maybe it will go away*
'''
return
if arg[0] == 'fen' and len(arg) >= 7:
self.analyzer.board.set_fen(' '.join(arg[1:7]))
del arg[:7]
else:
if arg[0] == 'startpos':
arg.pop(0)
self.analyzer.board.reset()
if arg and arg[0] == 'moves':
for move in arg[1:]:
self.analyzer.board.push_uci(move)
def do_go(self, arg):
arg = arg.split()
for parameter in self.go_parameter_list:
try:
index = arg.index(parameter)
except:
pass
else:
getattr(self, 'go_' + arg[index])(arg[index + 1:])
try:
index = arg.index('movetime')
time = float(arg[index + 1]) / 1000
except:
pass
else:
self.stop_timer = threading.Timer(time, self.do_stop)
self.stop_timer.start()
self.analyzer.is_working.set()
def do_stop(self, arg=None):
if hasattr(self, 'stop_timer'):
self.stop_timer.cancel()
if self.analyzer.is_working.is_set():
self.analyzer.is_working.clear()
else:
self.output_bestmove()
def do_quit(self, arg):
if hasattr(self, 'analyzer'):
self.analyzer.termination.set()
self.analyzer.is_working.set()
self.analyzer.join()
sys.exit()
def output_bestmove(self):
print('bestmove', self.analyzer.bestmove.uci(),
file=self.stdout, flush=True)
def output_info(self, info_string):
print('info', info_string,
file=self.stdout, flush=True)
def go_infinite(self, arg):
self.analyzer.infinite = True
def go_searchmoves(self, arg):
self.analyzer.possible_first_moves = set()
for uci_move in arg:
try:
move = chess.Move.from_uci(uci_move)
except:
break
else:
self.analyzer.possible_first_moves.add(move)
def go_depth(self, arg):
if not self.analyzer.debug:
return
try:
depth = int(arg[0])
except:
pass
else:
self.analyzer.max_depth = depth
def go_nodes(self, arg):
try:
number_of_nodes = int(arg[0])
except:
pass
else:
self.analyzer.depth = number_of_nodes
def default(self, arg):
pass
def precmd(self, line):
print(line, file=logfile, flush=True)
return line
def postcmd(self, stop, line):
self.stdout.flush()
return stop
if __name__ == '__main__':
print('new start', file=logfile, flush=True)
EngineShell().cmdloop()
|
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The application's Globals object"""
__all__ = ['Globals']
import logging
import cgi
import hashlib
import json
import datetime
from urllib import urlencode
from subprocess import Popen, PIPE
import os
import time
import traceback
import activitystream
import pkg_resources
import markdown
import pygments
import pygments.lexers
import pygments.formatters
import pygments.util
from tg import config, session
from pylons import request
from pylons import tmpl_context as c
from paste.deploy.converters import asbool, asint, aslist
from pypeline.markup import markup as pypeline_markup
import ew as ew_core
import ew.jinja2_ew as ew
from ming.utils import LazyProperty
from jinja2 import Markup
import allura.tasks.event_tasks
from allura import model as M
from allura.lib.markdown_extensions import (
ForgeExtension,
CommitMessageExtension,
)
from allura.eventslistener import PostEvent
from allura.lib import gravatar, plugin, utils
from allura.lib import helpers as h
from allura.lib.widgets import analytics
from allura.lib.security import Credentials
from allura.lib.solr import MockSOLR, make_solr_from_config
from allura.model.session import artifact_orm_session
log = logging.getLogger(__name__)
class ForgeMarkdown(markdown.Markdown):
def convert(self, source, render_limit=True):
if render_limit and len(source) > asint(config.get('markdown_render_max_length', 40000)):
# if text is too big, markdown can take a long time to process it,
# so we return it as a plain text
log.info('Text is too big. Skipping markdown processing')
escaped = cgi.escape(h.really_unicode(source))
return h.html.literal(u'<pre>%s</pre>' % escaped)
try:
return markdown.Markdown.convert(self, source)
except Exception:
log.info('Invalid markdown: %s Upwards trace is %s', source,
''.join(traceback.format_stack()), exc_info=True)
escaped = h.really_unicode(source)
escaped = cgi.escape(escaped)
return h.html.literal(u"""<p><strong>ERROR!</strong> The markdown supplied could not be parsed correctly.
Did you forget to surround a code snippet with "~~~~"?</p><pre>%s</pre>""" % escaped)
def cached_convert(self, artifact, field_name):
"""Convert ``artifact.field_name`` markdown source to html, caching
the result if the render time is greater than the defined threshold.
"""
source_text = getattr(artifact, field_name)
# Check if contents macro and never cache
if "[[" in source_text:
return self.convert(source_text)
cache_field_name = field_name + '_cache'
cache = getattr(artifact, cache_field_name, None)
if not cache:
log.warn(
'Skipping Markdown caching - Missing cache field "%s" on class %s',
field_name, artifact.__class__.__name__)
return self.convert(source_text)
bugfix_rev = 2 # increment this if we need all caches to invalidated (e.g. xss in markdown rendering fixed)
md5 = None
# If a cached version exists and it is valid, return it.
if cache.md5 is not None:
md5 = hashlib.md5(source_text.encode('utf-8')).hexdigest()
if cache.md5 == md5 and getattr(cache, 'fix7528', False) == bugfix_rev:
return h.html.literal(cache.html)
# Convert the markdown and time the result.
start = time.time()
html = self.convert(source_text, render_limit=False)
render_time = time.time() - start
threshold = config.get('markdown_cache_threshold')
try:
threshold = float(threshold) if threshold else None
except ValueError:
threshold = None
log.warn('Skipping Markdown caching - The value for config param '
'"markdown_cache_threshold" must be a float.')
if threshold is not None and render_time > threshold:
# Save the cache
if md5 is None:
md5 = hashlib.md5(source_text.encode('utf-8')).hexdigest()
cache.md5, cache.html, cache.render_time = md5, html, render_time
cache.fix7528 = bugfix_rev # flag to indicate good caches created after [#7528] and other critical bugs were fixed.
# Prevent cache creation from updating the mod_date timestamp.
_session = artifact_orm_session._get()
_session.skip_mod_date = True
return html
class NeighborhoodCache(object):
"""Cached Neighborhood objects by url_prefix.
For faster RootController.__init__ lookup
"""
def __init__(self, duration):
self.duration = duration
self._data = {}
def _lookup(self, url_prefix):
n = M.Neighborhood.query.get(url_prefix=url_prefix)
self._data[url_prefix] = {
'object': n,
'ts': datetime.datetime.utcnow(),
}
return n
def _expired(self, n):
delta = datetime.datetime.utcnow() - n['ts']
if delta >= datetime.timedelta(seconds=self.duration):
return True
return False
def get(self, url_prefix):
n = self._data.get(url_prefix)
if n and not self._expired(n):
return n['object']
return self._lookup(url_prefix)
class Globals(object):
"""Container for objects available throughout the life of the application.
One instance of Globals is created during application initialization and
is available during requests via the 'app_globals' variable.
"""
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
if self.__shared_state:
return
self.allura_templates = pkg_resources.resource_filename(
'allura', 'templates')
# Setup SOLR
self.solr_server = aslist(config.get('solr.server'), ',')
# skip empty strings in case of extra commas
self.solr_server = [s for s in self.solr_server if s]
self.solr_query_server = config.get('solr.query_server')
if self.solr_server:
self.solr = make_solr_from_config(
self.solr_server, self.solr_query_server)
self.solr_short_timeout = make_solr_from_config(
self.solr_server, self.solr_query_server,
timeout=int(config.get('solr.short_timeout', 10)))
else: # pragma no cover
log.warning('Solr config not set; using in-memory MockSOLR')
self.solr = self.solr_short_timeout = MockSOLR()
# Load login/logout urls; only used for customized logins
self.login_url = config.get('auth.login_url', '/auth/')
self.logout_url = config.get('auth.logout_url', '/auth/logout')
self.login_fragment_url = config.get(
'auth.login_fragment_url', '/auth/login_fragment/')
# Setup Gravatar
self.gravatar = gravatar.url
# Setup pygments
self.pygments_formatter = utils.LineAnchorCodeHtmlFormatter(
cssclass='codehilite',
linenos='table')
# Setup Pypeline
self.pypeline_markup = pypeline_markup
# Setup analytics
accounts = config.get('ga.account', 'UA-XXXXX-X')
accounts = accounts.split(' ')
self.analytics = analytics.GoogleAnalytics(accounts=accounts)
self.icons = dict(
move=Icon('fa fa-arrows', 'Move'),
edit=Icon('fa fa-edit', 'Edit'),
admin=Icon('fa fa-gear', 'Admin'),
send=Icon('fa fa-send-o', 'Send'),
add=Icon('fa fa-plus-circle', 'Add'),
moderate=Icon('fa fa-hand-stop-o', 'Moderate'),
pencil=Icon('fa fa-pencil', 'Edit'),
help=Icon('fa fa-question-circle', 'Help'),
eye=Icon('fa fa-eye', 'View'),
search=Icon('fa fa-search', 'Search'),
history=Icon('fa fa-calendar', 'History'),
feed=Icon('fa fa-rss', 'Feed'),
mail=Icon('fa fa-envelope-o', 'Subscribe'),
reply=Icon('fa fa-reply', 'Reply'),
tag=Icon('fa fa-tag', 'Tag'),
flag=Icon('fa fa-flag-o', 'Flag'),
undelete=Icon('fa fa-undo', 'Undelete'),
delete=Icon('fa fa-trash-o', 'Delete'),
close=Icon('fa fa-close', 'Close'),
table=Icon('fa fa-table', 'Table'),
stats=Icon('fa fa-line-chart', 'Stats'),
pin=Icon('fa fa-mail-pin', 'Pin'),
folder=Icon('fa fa-folder', 'Folder'),
fork=Icon('fa fa-code-fork', 'Fork'),
merge=Icon('fa fa-code-fork upside-down', 'Merge'),
conversation=Icon('fa fa-comments', 'Conversation'),
group=Icon('fa fa-group', 'Group'),
user=Icon('fa fa-user', 'User'),
secure=Icon('fa fa-lock', 'Lock'),
unsecure=Icon('fa fa-unlock', 'Unlock'),
star=Icon('fa fa-star', 'Star'),
expand=Icon('fa fa-expand', 'Maximize'),
restore=Icon('fa fa-compress', 'Restore'),
check=Icon('fa fa-check-circle', 'Check'),
caution=Icon('fa fa-ban', 'Caution'),
vote_up=Icon('fa fa-plus', 'Vote Up'),
vote_down=Icon('fa fa-minus', 'Vote Down'),
download=Icon('fa fa-download', 'Download'),
revert=Icon('fa fa-history', 'Revert'),
browse_commits=Icon('fa fa-list', 'Browse Commits'),
file=Icon('fa fa-file-o', 'File'),
# Permissions
perm_read=Icon('fa fa-eye', 'Read'),
perm_update=Icon('fa fa-rotate-left', 'Update'),
perm_create=Icon('fa fa-flash', 'Create'),
perm_register=Icon('fa fa-gear', 'Config'),
perm_delete=Icon('fa fa-minus-circle', 'Remove'),
perm_tool=Icon('fa fa-gear', 'Tool'),
perm_admin=Icon('fa fa-gear', 'Admin'),
perm_has_yes=Icon('fa fa-check', 'Check'),
perm_has_no=Icon('fa fa-ban', 'No entry'),
perm_has_inherit=Icon('fa fa-check-circle', 'Has inherit'),
)
# Cache some loaded entry points
def _cache_eps(section_name, dict_cls=dict):
d = dict_cls()
for ep in h.iter_entry_points(section_name):
value = ep.load()
d[ep.name] = value
return d
class entry_point_loading_dict(dict):
def __missing__(self, key):
self[key] = _cache_eps(key)
return self[key]
self.entry_points = entry_point_loading_dict(
tool=_cache_eps('allura', dict_cls=utils.CaseInsensitiveDict),
auth=_cache_eps('allura.auth'),
registration=_cache_eps('allura.project_registration'),
theme=_cache_eps('allura.theme'),
user_prefs=_cache_eps('allura.user_prefs'),
spam=_cache_eps('allura.spam'),
phone=_cache_eps('allura.phone'),
stats=_cache_eps('allura.stats'),
site_stats=_cache_eps('allura.site_stats'),
admin=_cache_eps('allura.admin'),
site_admin=_cache_eps('allura.site_admin'),
# macro eps are used solely for ensuring that external macros are
# imported (after load, the ep itself is not used)
macros=_cache_eps('allura.macros'),
webhooks=_cache_eps('allura.webhooks'),
)
# Neighborhood cache
duration = asint(config.get('neighborhood.cache.duration', 0))
self.neighborhood_cache = NeighborhoodCache(duration)
# Set listeners to update stats
statslisteners = []
for name, ep in self.entry_points['stats'].iteritems():
statslisteners.append(ep())
self.statsUpdater = PostEvent(statslisteners)
self.tmpdir = os.getenv('TMPDIR', '/tmp')
@LazyProperty
def spam_checker(self):
"""Return a SpamFilter implementation.
"""
from allura.lib import spam
return spam.SpamFilter.get(config, self.entry_points['spam'])
@LazyProperty
def phone_service(self):
"""Return a :class:`allura.lib.phone.PhoneService` implementation"""
from allura.lib import phone
return phone.PhoneService.get(config, self.entry_points['phone'])
@LazyProperty
def director(self):
"""Return activitystream director"""
if asbool(config.get('activitystream.recording.enabled', False)):
return activitystream.director()
else:
class NullActivityStreamDirector(object):
def connect(self, *a, **kw):
pass
def disconnect(self, *a, **kw):
pass
def is_connected(self, *a, **kw):
return False
def create_activity(self, *a, **kw):
pass
def create_timeline(self, *a, **kw):
pass
def create_timelines(self, *a, **kw):
pass
def get_timeline(self, *a, **kw):
return []
return NullActivityStreamDirector()
def post_event(self, topic, *args, **kwargs):
allura.tasks.event_tasks.event.post(topic, *args, **kwargs)
@LazyProperty
def theme(self):
return plugin.ThemeProvider.get()
@property
def antispam(self):
a = request.environ.get('allura.antispam')
if a is None:
a = request.environ['allura.antispam'] = utils.AntiSpam()
return a
@property
def credentials(self):
return Credentials.get()
def handle_paging(self, limit, page, default=25):
limit = self.manage_paging_preference(limit, default)
limit = max(int(limit), 1)
limit = min(limit, asint(config.get('limit_param_max', 500)))
page = max(int(page), 0)
start = page * int(limit)
return (limit, page, start)
def manage_paging_preference(self, limit, default=25):
if not limit:
if c.user in (None, M.User.anonymous()):
limit = default
else:
limit = c.user.get_pref('results_per_page') or default
return int(limit)
def document_class(self, neighborhood):
classes = ''
if neighborhood:
classes += ' neighborhood-%s' % neighborhood.name
if not neighborhood and c.project:
classes += ' neighborhood-%s' % c.project.neighborhood.name
if c.project:
classes += ' project-%s' % c.project.shortname
if c.app:
classes += ' mountpoint-%s' % c.app.config.options.mount_point
return classes
def highlight(self, text, lexer=None, filename=None):
if not text:
if lexer == 'diff':
return h.html.literal('<em>File contents unchanged</em>')
return h.html.literal('<em>Empty file</em>')
# Don't use line numbers for diff highlight's, as per [#1484]
if lexer == 'diff':
formatter = pygments.formatters.HtmlFormatter(
cssclass='codehilite', linenos=False)
else:
formatter = self.pygments_formatter
if lexer is None:
try:
lexer = pygments.lexers.get_lexer_for_filename(
filename, encoding='chardet')
except pygments.util.ClassNotFound:
# no highlighting, but we should escape, encode, and wrap it in
# a <pre>
text = h.really_unicode(text)
text = cgi.escape(text)
return h.html.literal(u'<pre>' + text + u'</pre>')
else:
lexer = pygments.lexers.get_lexer_by_name(
lexer, encoding='chardet')
return h.html.literal(pygments.highlight(text, lexer, formatter))
def forge_markdown(self, **kwargs):
'''return a markdown.Markdown object on which you can call convert'''
return ForgeMarkdown(
# 'fenced_code'
extensions=['codehilite',
ForgeExtension(
**kwargs), 'tables', 'toc', 'nl2br'],
output_format='html4')
@property
def markdown(self):
return self.forge_markdown()
@property
def markdown_wiki(self):
if c.project.is_nbhd_project:
return self.forge_markdown(wiki=True, macro_context='neighborhood-wiki')
elif c.project.is_user_project:
return self.forge_markdown(wiki=True, macro_context='userproject-wiki')
else:
return self.forge_markdown(wiki=True)
@property
def markdown_commit(self):
"""Return a Markdown parser configured for rendering commit messages.
"""
app = getattr(c, 'app', None)
return ForgeMarkdown(extensions=[CommitMessageExtension(app), 'nl2br'],
output_format='html4')
@property
def production_mode(self):
return asbool(config.get('debug')) == False
@LazyProperty
def user_message_time_interval(self):
"""The rolling window of time (in seconds) during which no more than
:meth:`user_message_max_messages` may be sent by any one user.
"""
return int(config.get('user_message.time_interval', 3600))
@LazyProperty
def user_message_max_messages(self):
"""The number of user messages that can be sent within
meth:`user_message_time_interval` before rate-limiting is enforced.
"""
return int(config.get('user_message.max_messages', 20))
@LazyProperty
def server_name(self):
p1 = Popen(['hostname', '-s'], stdout=PIPE)
server_name = p1.communicate()[0].strip()
p1.wait()
return server_name
@property
def tool_icon_css(self):
"""Return a (css, md5) tuple, where ``css`` is a string of CSS
containing class names and icon urls for every installed tool, and
``md5`` is the md5 hexdigest of ``css``.
"""
css = ''
for tool_name in self.entry_points['tool']:
for size in (24, 32, 48):
url = self.theme.app_icon_url(tool_name.lower(), size)
css += '.ui-icon-tool-%s-%i {background: url(%s) no-repeat;}\n' % (
tool_name, size, url)
return css, hashlib.md5(css).hexdigest()
@property
def resource_manager(self):
return ew_core.widget_context.resource_manager
def register_css(self, href, **kw):
self.resource_manager.register(ew.CSSLink(href, **kw))
def register_js(self, href, **kw):
self.resource_manager.register(ew.JSLink(href, **kw))
def register_forge_css(self, href, **kw):
self.resource_manager.register(ew.CSSLink('allura/' + href, **kw))
def register_forge_js(self, href, **kw):
self.resource_manager.register(ew.JSLink('allura/' + href, **kw))
def register_app_css(self, href, **kw):
app = kw.pop('app', c.app)
self.resource_manager.register(
ew.CSSLink('tool/%s/%s' % (app.config.tool_name.lower(), href), **kw))
def register_app_js(self, href, **kw):
app = kw.pop('app', c.app)
self.resource_manager.register(
ew.JSLink('tool/%s/%s' % (app.config.tool_name.lower(), href), **kw))
def register_theme_css(self, href, **kw):
self.resource_manager.register(ew.CSSLink(self.theme_href(href), **kw))
def register_theme_js(self, href, **kw):
self.resource_manager.register(ew.JSLink(self.theme_href(href), **kw))
def register_js_snippet(self, text, **kw):
self.resource_manager.register(ew.JSScript(text, **kw))
def theme_href(self, href):
return self.theme.href(href)
def forge_static(self, resource):
base = config['static.url_base']
if base.startswith(':'):
base = request.scheme + base
return base + resource
def app_static(self, resource, app=None):
base = config['static.url_base']
app = app or c.app
if base.startswith(':'):
base = request.scheme + base
return (base + app.config.tool_name.lower() + '/' + resource)
def set_project(self, pid_or_project):
'h.set_context() is preferred over this method'
if isinstance(pid_or_project, M.Project):
c.project = pid_or_project
elif isinstance(pid_or_project, basestring):
raise TypeError('need a Project instance, got %r' % pid_or_project)
elif pid_or_project is None:
c.project = None
else:
c.project = None
log.error('Trying g.set_project(%r)', pid_or_project)
def set_app(self, name):
'h.set_context() is preferred over this method'
c.app = c.project.app_instance(name)
def url(self, base, **kw):
params = urlencode(kw)
if params:
return '%s%s?%s' % (request.host_url, base, params)
else:
return '%s%s' % (request.host_url, base)
def postload_contents(self):
text = '''
'''
return json.dumps(dict(text=text))
def year(self):
return datetime.datetime.utcnow().year
@LazyProperty
def noreply(self):
return unicode(config.get('noreply', 'noreply@%s' % config['domain']))
@property
def build_key(self):
return config.get('build_key', '')
class Icon(object):
def __init__(self, css, title=None):
self.css = css
self.title = title or u''
def render(self, show_title=False, extra_css=None, closing_tag=True, tag='a', **kw):
title = kw.get('title') or self.title
attrs = {
'title': title,
'class': ' '.join(['icon', extra_css or '']).strip(),
}
if tag == 'a':
attrs['href'] = '#'
attrs.update(kw)
attrs = ew._Jinja2Widget().j2_attrs(attrs)
visible_title = u''
if show_title:
visible_title = u' {}'.format(Markup.escape(title))
closing_tag = u'</{}>'.format(tag) if closing_tag else u''
icon = u'<{} {}><i class="{}"></i>{}{}'.format(tag, attrs, self.css, visible_title, closing_tag)
return Markup(icon)
|
|
import datetime
import os.path
import zipfile
from django.core import mail
from django.core.files import temp
from django.core.files.base import File as DjangoFile
from django.urls import reverse
from unittest import mock
from pyquery import PyQuery as pq
from waffle.testutils import override_switch
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.activity.utils import ACTIVITY_MAIL_GROUP
from olympia.addons.models import Addon, AddonReviewerFlags
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import (
TestCase,
formset,
initial,
reverse_ns,
version_factory,
user_factory,
)
from olympia.applications.models import AppVersion
from olympia.constants.promoted import RECOMMENDED
from olympia.users.models import Group, UserProfile
from olympia.versions.models import ApplicationsVersions, Version
class TestVersion(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super().setUp()
self.client.login(email='del@icio.us')
self.user = UserProfile.objects.get(email='del@icio.us')
self.addon = self.get_addon()
self.version = Version.objects.get(id=81551)
self.url = self.addon.get_dev_url('versions')
self.disable_url = self.addon.get_dev_url('disable')
self.enable_url = self.addon.get_dev_url('enable')
self.delete_url = reverse('devhub.versions.delete', args=['a3615'])
self.delete_data = {'addon_id': self.addon.pk, 'version_id': self.version.pk}
def get_addon(self):
return Addon.objects.get(id=3615)
def get_doc(self):
response = self.client.get(self.url)
assert response.status_code == 200
return pq(response.content)
def test_version_status_public(self):
doc = self.get_doc()
assert doc('.addon-status')
self.addon.update(status=amo.STATUS_DISABLED, disabled_by_user=True)
doc = self.get_doc()
assert doc('.addon-status .status-admin-disabled')
assert doc('.addon-status .status-admin-disabled').text() == (
'Disabled by Mozilla'
)
self.addon.update(disabled_by_user=False)
doc = self.get_doc()
assert doc('.addon-status .status-admin-disabled').text() == (
'Disabled by Mozilla'
)
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=True)
doc = self.get_doc()
assert doc('.addon-status .status-disabled').text() == ('Invisible')
def test_label_open_marked_safe(self):
doc = self.get_doc()
assert '<strong>Visible:</strong>' in doc.html()
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=True)
doc = self.get_doc()
assert '<strong>Invisible:</strong>' in doc.html()
def test_upload_link_label_in_edit_nav(self):
url = reverse('devhub.versions.edit', args=(self.addon.slug, self.version.pk))
response = self.client.get(url)
link = pq(response.content)('.addon-status>.addon-upload>strong>a')
assert link.text() == 'Upload New Version'
assert link.attr('href') == (
reverse('devhub.submit.version', args=[self.addon.slug])
)
# Don't show for STATUS_DISABLED addons.
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.get(url)
assert not pq(response.content)('.addon-status>.addon-upload>strong>a')
def test_delete_message(self):
"""Make sure we warn our users of the pain they will feel."""
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#modal-delete p').eq(0).text() == (
'Deleting your add-on will permanently delete all versions and '
'files you have submitted for this add-on, listed or not. '
'The add-on ID cannot be restored and will forever be unusable '
'for submission.'
)
@override_switch('allow-deleted-guid-reuse', active=True)
def test_delete_message_if_allow_deleted_guid_reuse_is_on(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#modal-delete p').eq(0).text() == (
'Deleting your add-on will permanently delete all versions and '
'files you have submitted for this add-on, listed or not. '
'The add-on ID will continue to be linked to your account, so '
"others won't be able to submit versions using the same ID."
)
def test_delete_message_incomplete(self):
"""
If an addon has status = 0, they shouldn't be bothered with a
deny list threat if they hit delete.
"""
# Need to hard delete the version or add-on will be soft-deleted.
self.addon.current_version.delete(hard=True)
self.addon.reload()
assert self.addon.status == amo.STATUS_NULL
response = self.client.get(self.url)
doc = pq(response.content)
# Normally 2 paragraphs, one is the warning which we should take out.
assert doc('#modal-delete p.warning').length == 0
def test_delete_version(self):
self.client.post(self.delete_url, self.delete_data)
assert not Version.objects.filter(pk=81551).exists()
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 1
def test_version_delete_version_deleted(self):
self.version.delete()
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 404
def test_cant_delete_version(self):
self.client.logout()
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 302
assert Version.objects.filter(pk=81551).exists()
def test_version_delete_status_null(self):
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 302
assert self.addon.versions.count() == 0
assert Addon.objects.get(id=3615).status == amo.STATUS_NULL
def test_disable_version(self):
self.delete_data['disable_version'] = ''
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
def test_cant_disable_or_delete_current_version_recommended(self):
# If the add-on is recommended you can't disable or delete the current
# version.
self.make_addon_promoted(self.addon, RECOMMENDED, approve_version=True)
assert self.version == self.addon.current_version
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.filter(pk=81551).exists()
assert not Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 0
)
self.delete_data['disable_version'] = ''
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.filter(pk=81551).exists()
assert not Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 0
)
def test_can_disable_or_delete_current_ver_if_previous_recommended(self):
# If the add-on is recommended you *can* disable or delete the current
# version if the previous version is approved for recommendation too.
self.make_addon_promoted(self.addon, RECOMMENDED, approve_version=True)
previous_version = self.version
self.version = version_factory(addon=self.addon, promotion_approved=True)
self.addon.reload()
assert self.version == self.addon.current_version
assert previous_version != self.version
self.delete_data['version_id'] = self.version.id
self.delete_data['disable_version'] = ''
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.filter(pk=self.version.id).exists()
assert Version.objects.get(pk=self.version.id).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
del self.delete_data['disable_version']
self.client.post(self.delete_url, self.delete_data)
assert not Version.objects.filter(pk=self.version.id).exists()
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 1
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
self.addon.reload()
assert self.addon.current_version == previous_version
# It's still recommended.
assert self.addon.promoted_group() == RECOMMENDED
def test_can_still_disable_or_delete_old_version_recommended(self):
# If the add-on is recommended, you can still disable or delete older
# versions than the current one.
self.make_addon_promoted(self.addon, RECOMMENDED, approve_version=True)
version_factory(addon=self.addon, promotion_approved=True)
self.addon.reload()
assert self.version != self.addon.current_version
self.delete_data['disable_version'] = ''
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.filter(pk=81551).exists()
assert Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
del self.delete_data['disable_version']
self.client.post(self.delete_url, self.delete_data)
assert not Version.objects.filter(pk=81551).exists()
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 1
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
def test_can_still_disable_or_delete_current_version_unapproved(self):
# If the add-on is in recommended group but hasn't got approval yet,
# then deleting the current version is fine.
self.make_addon_promoted(self.addon, RECOMMENDED)
assert self.version == self.addon.current_version
self.delete_data['disable_version'] = ''
self.client.post(self.delete_url, self.delete_data)
assert Version.objects.filter(pk=81551).exists()
assert Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 0
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
del self.delete_data['disable_version']
self.client.post(self.delete_url, self.delete_data)
assert not Version.objects.filter(pk=81551).exists()
assert ActivityLog.objects.filter(action=amo.LOG.DELETE_VERSION.id).count() == 1
assert (
ActivityLog.objects.filter(action=amo.LOG.DISABLE_VERSION.id).count() == 1
)
def test_reenable_version(self):
Version.objects.get(pk=81551).file.update(
status=amo.STATUS_DISABLED, original_status=amo.STATUS_APPROVED
)
self.reenable_url = reverse('devhub.versions.reenable', args=['a3615'])
response = self.client.post(self.reenable_url, self.delete_data, follow=True)
assert response.status_code == 200
assert not Version.objects.get(pk=81551).is_user_disabled
assert ActivityLog.objects.filter(action=amo.LOG.ENABLE_VERSION.id).count() == 1
def test_reenable_deleted_version(self):
Version.objects.get(pk=81551).delete()
self.delete_url = reverse('devhub.versions.reenable', args=['a3615'])
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 404
assert ActivityLog.objects.filter(action=amo.LOG.ENABLE_VERSION.id).count() == 0
def _extra_version_and_file(self, status):
version = Version.objects.get(id=81551)
version_two = version_factory(
addon=self.addon,
license=version.license,
version='1.2.3',
file_kw={'status': status},
)
return version_two, version_two.file
def test_version_delete_status(self):
self._extra_version_and_file(amo.STATUS_APPROVED)
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 302
assert self.addon.versions.count() == 1
assert Addon.objects.get(id=3615).status == amo.STATUS_APPROVED
def test_version_delete_status_unreviewed(self):
self._extra_version_and_file(amo.STATUS_AWAITING_REVIEW)
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 302
assert self.addon.versions.count() == 1
assert Addon.objects.get(id=3615).status == amo.STATUS_NOMINATED
@mock.patch('olympia.files.models.File.hide_disabled_file')
def test_user_can_disable_addon(self, hide_mock):
version = self.addon.current_version
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=False)
response = self.client.post(self.disable_url)
assert response.status_code == 302
addon = Addon.objects.get(id=3615)
version.reload()
assert addon.disabled_by_user
assert addon.status == amo.STATUS_APPROVED
assert hide_mock.called
# Check we didn't change the status of the files.
assert version.file.status == amo.STATUS_APPROVED
entry = ActivityLog.objects.get()
assert entry.action == amo.LOG.USER_DISABLE.id
msg = entry.to_string()
assert str(self.addon.name) in msg, 'Unexpected: %r' % msg
@mock.patch('olympia.files.models.File.hide_disabled_file')
def test_user_can_disable_addon_pending_version(self, hide_mock):
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=False)
(new_version, _) = self._extra_version_and_file(amo.STATUS_AWAITING_REVIEW)
assert (
self.addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
== new_version
)
response = self.client.post(self.disable_url)
assert response.status_code == 302
addon = Addon.objects.get(id=3615)
assert addon.disabled_by_user
assert addon.status == amo.STATUS_APPROVED
assert hide_mock.called
# Check we disabled the file pending review.
new_version.file.reload()
assert new_version.file.status == amo.STATUS_DISABLED
# latest version should be reset when the file/version was disabled.
assert (
self.addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
!= new_version
)
entry = ActivityLog.objects.latest('pk')
assert entry.action == amo.LOG.USER_DISABLE.id
msg = entry.to_string()
assert str(self.addon.name) in msg, 'Unexpected: %r' % msg
@mock.patch('olympia.files.models.File.hide_disabled_file')
def test_disabling_addon_awaiting_review_disables_version(self, hide_mock):
self.addon.update(status=amo.STATUS_AWAITING_REVIEW, disabled_by_user=False)
self.version.file.update(status=amo.STATUS_AWAITING_REVIEW)
res = self.client.post(self.disable_url)
assert res.status_code == 302
addon = Addon.objects.get(id=3615)
assert addon.disabled_by_user
assert addon.status == amo.STATUS_NULL
assert hide_mock.called
# Check we disabled the file pending review.
self.version = Version.objects.get(id=self.version.id)
assert self.version.file.status == amo.STATUS_DISABLED
def test_user_get(self):
assert self.client.get(self.enable_url).status_code == 405
def test_user_can_enable_addon(self):
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=True)
response = self.client.post(self.enable_url)
self.assert3xx(response, self.url, 302)
addon = self.get_addon()
assert not addon.disabled_by_user
assert addon.status == amo.STATUS_APPROVED
entry = ActivityLog.objects.get()
assert entry.action == amo.LOG.USER_ENABLE.id
msg = entry.to_string()
assert str(self.addon.name) in msg, 'Unexpected: %r' % msg
def test_unprivileged_user_cant_disable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
response = self.client.post(self.disable_url)
assert response.status_code == 302
assert not Addon.objects.get(id=3615).disabled_by_user
def test_non_owner_cant_disable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.post(self.disable_url)
assert response.status_code == 403
assert not Addon.objects.get(id=3615).disabled_by_user
def test_non_owner_cant_enable_addon(self):
self.addon.update(disabled_by_user=False)
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.get(self.enable_url)
assert response.status_code == 403
assert not Addon.objects.get(id=3615).disabled_by_user
def test_non_owner_cant_change_status(self):
"""A non-owner can't use the radio buttons."""
self.addon.update(disabled_by_user=False)
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
def test_published_addon_radio(self):
"""Published (listed) addon is selected: can hide or publish."""
self.addon.update(disabled_by_user=False)
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.enable-addon').attr('checked') == 'checked'
enable_url = self.addon.get_dev_url('enable')
assert doc('.enable-addon').attr('data-url') == enable_url
assert not doc('.enable-addon').attr('disabled')
assert doc('#modal-disable')
assert not doc('.disable-addon').attr('checked')
assert not doc('.disable-addon').attr('disabled')
def test_hidden_addon_radio(self):
"""Hidden (disabled) addon is selected: can hide or publish."""
self.addon.update(disabled_by_user=True)
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('.enable-addon').attr('checked')
assert not doc('.enable-addon').attr('disabled')
assert doc('.disable-addon').attr('checked') == 'checked'
assert not doc('.disable-addon').attr('disabled')
assert not doc('#modal-disable')
def test_status_disabled_addon_radio(self):
"""Disabled by Mozilla addon: hidden selected, can't change status."""
self.addon.update(status=amo.STATUS_DISABLED, disabled_by_user=False)
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('.enable-addon').attr('checked')
assert doc('.enable-addon').attr('disabled') == 'disabled'
assert doc('.disable-addon').attr('checked') == 'checked'
assert doc('.disable-addon').attr('disabled') == 'disabled'
def test_no_listed_versions_already_enabled(self):
self.addon.versions.all().delete()
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('.enable-addon')
assert not doc('.disable-addon')
def test_no_listed_versions_already_disabled(self):
# If somehow the add-on has no listed versions but is invisible, we
# allow them to switch back to visible so that they can submit listed
# versions.
self.addon.versions.all().delete()
self.addon.update(disabled_by_user=True)
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.enable-addon')
assert doc('.disable-addon')
assert not doc('.enable-addon').attr('checked')
assert not doc('.enable-addon').attr('disabled')
assert doc('.disable-addon').attr('checked') == 'checked'
assert not doc('.disable-addon').attr('disabled')
def test_cancel_get(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615', 'listed'])
assert self.client.get(cancel_url).status_code == 405
def test_cancel_wrong_status(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615', 'listed'])
for status in Addon.STATUS_CHOICES:
if status in (amo.STATUS_NOMINATED, amo.STATUS_DELETED):
continue
self.addon.update(status=status)
self.client.post(cancel_url)
assert Addon.objects.get(id=3615).status == status
def test_cancel(self):
cancel_url = reverse('devhub.addons.cancel', args=['a3615', 'listed'])
self.addon.update(status=amo.STATUS_NOMINATED)
self.client.post(cancel_url)
assert Addon.objects.get(id=3615).status == amo.STATUS_NULL
def test_cancel_obey_channel_listed(self):
addon = Addon.objects.get(id=3615)
file_ = addon.current_version.file
file_.update(status=amo.STATUS_AWAITING_REVIEW)
unlisted_file = version_factory(
addon=addon,
channel=amo.RELEASE_CHANNEL_UNLISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
).file
cancel_url = reverse('devhub.addons.cancel', args=['a3615', 'listed'])
self.client.post(cancel_url)
file_.reload()
assert file_.status == amo.STATUS_DISABLED
unlisted_file.reload()
assert unlisted_file.status == amo.STATUS_AWAITING_REVIEW
addon.reload()
assert addon.status == amo.STATUS_NULL
def test_cancel_obey_channel_unlisted(self):
addon = Addon.objects.get(id=3615)
version = addon.current_version
version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
file_ = version.file
file_.update(status=amo.STATUS_AWAITING_REVIEW)
listed_file = version_factory(
addon=addon,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
).file
addon.update(status=amo.STATUS_NOMINATED)
cancel_url = reverse('devhub.addons.cancel', args=['a3615', 'unlisted'])
self.client.post(cancel_url)
file_.reload()
assert file_.status == amo.STATUS_DISABLED
listed_file.reload()
assert listed_file.status == amo.STATUS_AWAITING_REVIEW
addon.reload()
assert addon.status == amo.STATUS_NOMINATED
def test_not_cancel(self):
self.client.logout()
cancel_url = reverse('devhub.addons.cancel', args=['a3615', 'listed'])
assert self.addon.status == amo.STATUS_APPROVED
response = self.client.post(cancel_url)
assert response.status_code == 302
assert Addon.objects.get(id=3615).status == amo.STATUS_APPROVED
def test_cancel_button(self):
for status in Addon.STATUS_CHOICES:
if status != amo.STATUS_NOMINATED:
continue
self.addon.update(status=status)
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#cancel-review')
assert doc('#modal-cancel')
def test_not_cancel_button(self):
for status in Addon.STATUS_CHOICES:
if status == amo.STATUS_NOMINATED:
continue
self.addon.update(status=status)
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('#cancel-review'), status
assert not doc('#modal-cancel'), status
def test_incomplete_request_review(self):
self.addon.update(status=amo.STATUS_NULL)
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button').text()
assert buttons == 'Request Review'
def test_in_submission_can_request_review(self):
self.addon.update(status=amo.STATUS_NULL)
latest_version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED
)
latest_version.file.update(status=amo.STATUS_DISABLED)
version_factory(addon=self.addon, file_kw={'status': amo.STATUS_DISABLED})
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button')
# We should only show the links for one of the disabled versions.
assert buttons.length == 1
assert buttons.text() == 'Request Review'
def test_reviewed_cannot_request_review(self):
self.addon.update(status=amo.STATUS_NULL)
latest_version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED
)
latest_version.file.update(
reviewed=datetime.datetime.now(), status=amo.STATUS_DISABLED
)
version_factory(
addon=self.addon,
file_kw={
'reviewed': datetime.datetime.now(),
'status': amo.STATUS_DISABLED,
},
)
doc = pq(self.client.get(self.url).content)
buttons = doc('.version-status-actions form button')
# We should only show the links for one of the disabled versions.
assert buttons.length == 0
def test_version_history(self):
v1 = self.version
v2, _ = self._extra_version_and_file(amo.STATUS_AWAITING_REVIEW)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
show_links = doc('.review-history-show')
assert show_links.length == 3
assert show_links[0].attrib['data-div'] == '#%s-review-history' % v1.id
assert not show_links[1].attrib.get('data-div')
assert show_links[2].attrib['data-div'] == '#%s-review-history' % v2.id
# All 3 links will have a 'data-version' attribute.
assert show_links[0].attrib['data-version'] == str(v1.id)
# But the 2nd link will point to the latest version in the channel.
assert show_links[1].attrib['data-version'] == str(v2.id)
assert show_links[2].attrib['data-version'] == str(v2.id)
# Test review history
review_history_td = doc('#%s-review-history' % v1.id)[0]
assert review_history_td.attrib['data-session-id'] == (
self.client.session.session_key
)
api_url = absolutify(
reverse_ns(
'version-reviewnotes-list', args=[self.addon.id, self.version.id]
)
)
assert review_history_td.attrib['data-api-url'] == api_url
assert doc('.review-history-hide').length == 2
pending_activity_count = doc('.review-history-pending-count')
# No counter, because we don't have any pending activity to show.
assert pending_activity_count.length == 0
# Reply box div is there (only one)
assert doc('.dev-review-reply-form').length == 1
review_form = doc('.dev-review-reply-form')[0]
review_form.attrib['action'] == api_url
review_form.attrib['data-session-id'] == self.client.session.session_key
review_form.attrib['data-history'] == '#%s-review-history' % v2.id
def test_version_history_mixed_channels(self):
v1 = self.version
v2, _ = self._extra_version_and_file(amo.STATUS_AWAITING_REVIEW)
v2.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# Should be 2 reply boxes, one for each channel
assert doc('.dev-review-reply-form').length == 2
doc('.dev-review-reply-form')[0].attrib['data-history'] == (
'#%s-review-history' % v1.id
)
doc('.dev-review-reply-form')[1].attrib['data-history'] == (
'#%s-review-history' % v2.id
)
def test_pending_activity_count(self):
v2, _ = self._extra_version_and_file(amo.STATUS_AWAITING_REVIEW)
# Add some activity log messages
ActivityLog.create(amo.LOG.REVIEWER_REPLY_VERSION, v2.addon, v2, user=self.user)
ActivityLog.create(amo.LOG.REVIEWER_REPLY_VERSION, v2.addon, v2, user=self.user)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# Two versions, but three review-history-show because one reply link.
assert doc('.review-history-show').length == 3
# Two versions, but only one counter, for the latest/deleted version
pending_activity_count = doc('.review-history-pending-count')
assert pending_activity_count.length == 1
# There are two activity logs pending
assert pending_activity_count.text() == '2'
def test_channel_tag(self):
self.addon.current_version.update(created=self.days_ago(1))
v2, _ = self._extra_version_and_file(amo.STATUS_DISABLED)
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_LISTED)
self.addon.update_version()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('td.file-status').length == 2
# No tag shown because all listed versions
assert doc('span.distribution-tag-listed').length == 0
assert doc('span.distribution-tag-unlisted').length == 0
# Make all the versions unlisted.
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self.addon.update_version()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('td.file-status').length == 2
# No tag shown because all unlisted versions
assert doc('span.distribution-tag-listed').length == 0
assert doc('span.distribution-tag-unlisted').length == 0
# Make one of the versions listed.
v2.update(channel=amo.RELEASE_CHANNEL_LISTED)
v2.file.update(status=amo.STATUS_AWAITING_REVIEW)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
file_status_tds = doc('td.file-status')
assert file_status_tds.length == 2
# Tag for channels are shown because both listed and unlisted versions.
assert file_status_tds('span.distribution-tag-listed').length == 1
assert file_status_tds('span.distribution-tag-unlisted').length == 1
# Extra tags in the headers too
assert doc('h3 span.distribution-tag-listed').length == 2
def test_site_permission(self):
self.addon.update(type=amo.ADDON_SITE_PERMISSION)
# Authors can see the versions page of a site permission add-on.
response = self.client.get(self.url)
assert response.status_code == 200
# They can't delete/disable/enable versions though.
response = self.client.post(self.disable_url)
assert response.status_code == 403
response = self.client.post(self.enable_url)
assert response.status_code == 403
response = self.client.post(self.delete_url, self.delete_data)
assert response.status_code == 403
class TestVersionEditBase(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super().setUp()
self.user = UserProfile.objects.get(email='del@icio.us')
self.client.login(email=self.user.email)
self.addon = self.get_addon()
self.version = self.get_version()
self.url = reverse('devhub.versions.edit', args=['a3615', self.version.id])
self.v1, _created = AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='1.0'
)
self.v5, _created = AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='5.0'
)
def get_addon(self):
return Addon.objects.get(id=3615)
def get_version(self):
return self.get_addon().current_version
def formset(self, *args, **kw):
return formset(*args, **kw)
class TestVersionEditDetails(TestVersionEditBase):
def setUp(self):
super().setUp()
ctx = self.client.get(self.url).context
compat = initial(ctx['compat_form'].forms[0])
self.initial = formset(compat)
def formset(self, *args, **kw):
return super().formset(*args, **{**self.initial, **kw})
def test_edit_notes(self):
data = self.formset(release_notes='xx', approval_notes='yy')
response = self.client.post(self.url, data)
assert response.status_code == 302
version = self.get_version()
assert str(version.release_notes) == 'xx'
assert str(version.approval_notes) == 'yy'
def test_version_number_redirect(self):
url = self.url.replace(str(self.version.id), self.version.version)
response = self.client.get(url, follow=True)
self.assert3xx(response, self.url)
def test_version_deleted(self):
self.version.delete()
response = self.client.get(self.url)
assert response.status_code == 404
data = self.formset(release_notes='xx', approval_notes='yy')
response = self.client.post(self.url, data)
assert response.status_code == 404
def test_cant_upload(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('a.add-file')
def test_add(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert response.context['compat_form'].extra_forms
assert doc('p.add-app')[0].attrib['class'] == 'add-app'
def test_add_not(self):
for id in [18, 52, 59, 60, 61]:
av = AppVersion(application=id, version='1')
av.save()
ApplicationsVersions(
application=id, min=av, max=av, version=self.version
).save()
response = self.client.get(self.url)
doc = pq(response.content)
assert not response.context['compat_form'].extra_forms
assert doc('p.add-app')[0].attrib['class'] == 'add-app hide'
def test_existing_source_link(self):
with temp.NamedTemporaryFile(
suffix='.zip', dir=temp.gettempdir()
) as source_file:
with zipfile.ZipFile(source_file, 'w') as zip_file:
zip_file.writestr('foo', 'a' * (2**21))
source_file.seek(0)
self.version.source.save(
os.path.basename(source_file.name), DjangoFile(source_file)
)
self.version.save()
response = self.client.get(self.url)
doc = pq(response.content)
link = doc('.current-source-link')
assert link
assert link.text() == 'View current'
assert link[0].attrib['href'] == reverse(
'downloads.source', args=(self.version.pk,)
)
def test_should_accept_zip_source_file(self):
with temp.NamedTemporaryFile(
suffix='.zip', dir=temp.gettempdir()
) as source_file:
with zipfile.ZipFile(source_file, 'w') as zip_file:
zip_file.writestr('foo', 'a' * (2**21))
source_file.seek(0)
data = self.formset(source=source_file)
response = self.client.post(self.url, data)
assert response.status_code == 302
version = Version.objects.get(pk=self.version.pk)
assert version.source
assert version.addon.needs_admin_code_review
# Check that the corresponding automatic activity log has been created.
assert ActivityLog.objects.filter(
action=amo.LOG.SOURCE_CODE_UPLOADED.id
).exists()
log = ActivityLog.objects.get(action=amo.LOG.SOURCE_CODE_UPLOADED.id)
assert log.user == self.user
assert log.details is None
assert log.arguments == [self.addon, self.version]
@mock.patch('olympia.devhub.views.log')
def test_logging(self, log_mock):
with temp.NamedTemporaryFile(
suffix='.zip', dir=temp.gettempdir()
) as source_file:
with zipfile.ZipFile(source_file, 'w') as zip_file:
zip_file.writestr('foo', 'a' * (2**21))
source_file.seek(0)
data = self.formset(source=source_file)
response = self.client.post(self.url, data)
assert response.status_code == 302
assert log_mock.info.call_count == 4
assert log_mock.info.call_args_list[0][0] == (
'version_edit, form populated, addon.slug: %s, version.id: %s',
self.addon.slug,
self.version.id,
)
assert log_mock.info.call_args_list[1][0] == (
'version_edit, form validated, addon.slug: %s, version.id: %s',
self.addon.slug,
self.version.id,
)
assert log_mock.info.call_args_list[2][0] == (
'version_edit, form saved, addon.slug: %s, version.id: %s',
self.addon.slug,
self.version.id,
)
assert log_mock.info.call_args_list[3][0] == (
'version_edit, redirecting to next view, addon.slug: %s, version.id: %s',
self.addon.slug,
self.version.id,
)
@mock.patch('olympia.devhub.views.log')
def test_no_logging_on_initial_display(self, log_mock):
response = self.client.get(self.url)
assert response.status_code == 200
assert log_mock.info.call_count == 0
@mock.patch('olympia.devhub.views.log')
def test_no_logging_without_source(self, log_mock):
data = self.formset(release_notes='xx')
response = self.client.post(self.url, data)
assert response.status_code == 302
assert log_mock.info.call_count == 0
@mock.patch('olympia.devhub.views.log')
def test_logging_failed_validation(self, log_mock):
with temp.NamedTemporaryFile(
suffix='.exe', dir=temp.gettempdir()
) as source_file:
with zipfile.ZipFile(source_file, 'w') as zip_file:
zip_file.writestr('foo', 'a' * (2**21))
source_file.seek(0)
data = self.formset(source=source_file)
response = self.client.post(self.url, data)
assert response.status_code == 200
assert response.context['version_form'].errors == {
'source': [
'Unsupported file type, please upload an archive file '
+ '(.zip, .tar.gz, .tgz, .tar.bz2).'
]
}
assert log_mock.info.call_count == 2
assert log_mock.info.call_args_list[0][0] == (
'version_edit, form populated, addon.slug: %s, version.id: %s',
self.addon.slug,
self.version.id,
)
assert log_mock.info.call_args_list[1][0] == (
'version_edit, validation failed, re-displaying the template, '
+ 'addon.slug: %s, version.id: %s',
self.addon.slug,
self.version.id,
)
def test_email_is_sent_to_relevant_people_for_source_code_upload(self):
# Have a reviewer review a version.
reviewer = user_factory()
self.grant_permission(reviewer, 'Addons:Review')
ActivityLog.create(
amo.LOG.REJECT_VERSION_DELAYED, self.addon, self.version, user=reviewer
)
# Add an extra developer to the add-on
extra_author = user_factory()
self.addon.authors.add(extra_author)
# Add someone in group meant to receive a copy of all activity emails.
group, _ = Group.objects.get_or_create(name=ACTIVITY_MAIL_GROUP)
staff_user = user_factory()
staff_user.groups.add(group)
# Have the developer upload source file for the version reviewed.
self.test_should_accept_zip_source_file()
# Check that an email has been sent to relevant people.
assert len(mail.outbox) == 3
for message in mail.outbox:
assert message.subject == ('Mozilla Add-ons: Delicious Bookmarks 2.1.072')
assert 'Source code uploaded' in message.body
# Check each message was sent separately to who we are meant to notify.
assert mail.outbox[0].to != mail.outbox[1].to != mail.outbox[2].to
assert set(mail.outbox[0].to + mail.outbox[1].to + mail.outbox[2].to) == {
reviewer.email,
extra_author.email,
staff_user.email,
}
def test_should_not_accept_exe_source_file(self):
with temp.NamedTemporaryFile(
suffix='.exe', dir=temp.gettempdir()
) as source_file:
with zipfile.ZipFile(source_file, 'w') as zip_file:
zip_file.writestr('foo', 'a' * (2**21))
source_file.seek(0)
data = self.formset(source=source_file)
response = self.client.post(self.url, data)
assert response.status_code == 200
assert not Version.objects.get(pk=self.version.pk).source
def test_dont_reset_needs_admin_code_review_flag_if_no_new_source(self):
tdir = temp.gettempdir()
tmp_file = temp.NamedTemporaryFile
with tmp_file(suffix='.zip', dir=tdir) as source_file:
with zipfile.ZipFile(source_file, 'w') as zip_file:
zip_file.writestr('foo', 'a' * (2**21))
source_file.seek(0)
data = self.formset(source=source_file)
response = self.client.post(self.url, data)
assert response.status_code == 302
version = Version.objects.get(pk=self.version.pk)
assert version.source
assert version.addon.needs_admin_code_review
# Unset the "admin review" flag, and re save the version. It shouldn't
# reset the flag, as the source hasn't changed.
AddonReviewerFlags.objects.get(addon=version.addon).update(
needs_admin_code_review=False
)
data = self.formset(name='some other name')
response = self.client.post(self.url, data)
assert response.status_code == 302
version = Version.objects.get(pk=self.version.pk)
assert version.source
assert not version.addon.needs_admin_code_review
def test_site_permission(self):
self.addon.update(type=amo.ADDON_SITE_PERMISSION)
# Authors can see a version page of a site permission add-on.
response = self.client.get(self.url)
assert response.status_code == 200
# They can't edit it though.
response = self.client.post(self.url, self.formset())
assert response.status_code == 403
class TestVersionEditStaticTheme(TestVersionEditBase):
def setUp(self):
super().setUp()
self.addon.update(type=amo.ADDON_STATICTHEME)
def test_no_compat(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('#id_form-TOTAL_FORMS')
def test_no_upload(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('a.add-file')
class TestVersionEditCompat(TestVersionEditBase):
def setUp(self):
super().setUp()
self.android_32pre, _created = AppVersion.objects.get_or_create(
application=amo.ANDROID.id, version='3.2a1pre'
)
self.android_30, _created = AppVersion.objects.get_or_create(
application=amo.ANDROID.id, version='3.0'
)
def get_form(self, url=None):
if not url:
url = self.url
av = self.version.apps.get()
assert av.min.version == '2.0'
assert av.max.version == '4.0'
form = self.client.get(url).context['compat_form'].initial_forms[0]
return initial(form)
def formset(self, *args, **kw):
defaults = formset(prefix='files')
defaults.update(kw)
return super().formset(*args, **defaults)
def test_add_appversion(self):
form = self.client.get(self.url).context['compat_form'].initial_forms[0]
data = self.formset(
initial(form),
{
'application': amo.ANDROID.id,
'min': self.android_30.id,
'max': self.android_32pre.id,
},
initial_count=1,
)
response = self.client.post(self.url, data)
assert response.status_code == 302
apps = [app.id for app in self.get_version().compatible_apps.keys()]
assert sorted(apps) == sorted([amo.FIREFOX.id, amo.ANDROID.id])
assert list(ActivityLog.objects.all().values_list('action')) == (
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)]
)
def test_update_appversion(self):
data = self.get_form()
data.update(min=self.v1.id, max=self.v5.id)
response = self.client.post(self.url, self.formset(data, initial_count=1))
assert response.status_code == 302
av = self.version.apps.get()
assert av.min.version == '1.0'
assert av.max.version == '5.0'
assert list(ActivityLog.objects.all().values_list('action')) == (
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)]
)
def test_ajax_update_appversion(self):
url = reverse('devhub.ajax.compat.update', args=['a3615', self.version.id])
data = self.get_form(url)
data.update(min=self.v1.id, max=self.v5.id)
response = self.client.post(url, self.formset(data, initial_count=1))
assert response.status_code == 200
av = self.version.apps.get()
assert av.min.version == '1.0'
assert av.max.version == '5.0'
assert list(ActivityLog.objects.all().values_list('action')) == (
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)]
)
def test_ajax_update_on_deleted_version(self):
url = reverse('devhub.ajax.compat.update', args=['a3615', self.version.id])
data = self.get_form(url)
data.update(min=self.v1.id, max=self.v5.id)
self.version.delete()
response = self.client.post(url, self.formset(data, initial_count=1))
assert response.status_code == 404
def test_delete_appversion(self):
# Add android compat so we can delete firefox.
self.test_add_appversion()
form = self.client.get(self.url).context['compat_form']
data = list(map(initial, form.initial_forms))
data[0]['DELETE'] = True
response = self.client.post(self.url, self.formset(*data, initial_count=2))
assert response.status_code == 302
apps = [app.id for app in self.get_version().compatible_apps.keys()]
assert apps == [amo.ANDROID.id]
assert list(ActivityLog.objects.all().values_list('action')) == (
[(amo.LOG.MAX_APPVERSION_UPDATED.id,)]
)
def test_unique_apps(self):
form = self.client.get(self.url).context['compat_form'].initial_forms[0]
dupe = initial(form)
del dupe['id']
data = self.formset(initial(form), dupe, initial_count=1)
response = self.client.post(self.url, data)
assert response.status_code == 200
# Because of how formsets work, the second form is expected to be a
# tbird version range. We got an error, so we're good.
def test_require_appversion(self):
old_av = self.version.apps.get()
form = self.client.get(self.url).context['compat_form'].initial_forms[0]
data = initial(form)
data['DELETE'] = True
response = self.client.post(self.url, self.formset(data, initial_count=1))
assert response.status_code == 200
compat_formset = response.context['compat_form']
assert compat_formset.non_form_errors() == (
['Need at least one compatible application.']
)
assert self.version.apps.get() == old_av
# Make sure the user can re-submit again from the page showing the
# validation error: we should display all previously present compat
# forms, with the DELETE bit off.
assert compat_formset.data == compat_formset.forms[0].data
assert compat_formset.forms[0]['DELETE'].value() is False
def test_proper_min_max(self):
form = self.client.get(self.url).context['compat_form'].initial_forms[0]
data = initial(form)
data['min'], data['max'] = data['max'], data['min']
response = self.client.post(self.url, self.formset(data, initial_count=1))
assert response.status_code == 200
assert response.context['compat_form'].forms[0].non_field_errors() == (
['Invalid version range.']
)
def test_same_min_max(self):
form = self.client.get(self.url).context['compat_form'].initial_forms[0]
data = initial(form)
data['max'] = data['min']
response = self.client.post(self.url, self.formset(data, initial_count=1))
assert response.status_code == 302
av = self.version.apps.all()[0]
assert av.min == av.max
def test_statictheme_no_compat_edit(self):
"""static themes don't allow users to overwrite compat data."""
addon = self.get_addon()
addon.update(type=amo.ADDON_STATICTHEME)
|
|
from __future__ import print_function
import socket
from threading import Thread, Lock
from multiprocessing.pool import ThreadPool
from contextlib import contextmanager
import traceback
from datetime import datetime
import uuid
import random
import multiprocessing
import zmq
from time import time
import sys
from ..compatibility import Queue, unicode
from .. import core
try:
import cPickle as pickle
except ImportError:
import pickle
import dill
def pickle_dumps(obj):
return pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
MAX_DEALERS = 100
with open('log.workers', 'w') as f: # delete file
pass
def log(*args):
with open('log.workers', 'a') as f:
print(*args, file=f)
log('Hello from worker.py')
@contextmanager
def logerrors():
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
log('Error!', str(e))
log('Traceback', str(tb))
raise
class Worker(object):
""" Asynchronous worker in a distributed dask computation pool
Parameters
----------
scheduler: string
Address of scheduler
hostname: string
A visible hostname/IP of this worker to the network
port_to_workers: int
Port on which to listen to worker connections
bind_to_workers: string
Addresses from which we accept worker connections, defaults to *
State
-----
status: string
Status of worker, either 'run' or 'closed'
to_workers: zmq.Socket
Router socket to serve requests from other workers
to_scheduler: zmq.Socket
Dealer socket to communicate with scheduler
See Also
--------
dask.distributed.scheduler.Scheduler
"""
def __init__(self, scheduler, data=None, nthreads=100,
hostname=None, port_to_workers=None, bind_to_workers='*',
block=False):
if isinstance(scheduler, unicode):
scheduler = scheduler.encode()
self.data = data if data is not None else dict()
self.pool = ThreadPool(nthreads)
self.scheduler = scheduler
self.status = 'run'
self.context = zmq.Context()
self.hostname = hostname or socket.gethostname()
self.to_workers = self.context.socket(zmq.ROUTER)
if port_to_workers is None:
port_to_workers = self.to_workers.bind_to_random_port('tcp://' + bind_to_workers)
else:
self.to_workers.bind('tcp://%s:%d' % (bind_to_workers, port))
self.address = ('tcp://%s:%s' % (self.hostname, port_to_workers)).encode()
self.dealers = dict()
self.lock = Lock()
self.queues = dict()
self.to_scheduler = self.context.socket(zmq.DEALER)
self.to_scheduler.setsockopt(zmq.IDENTITY, self.address)
self.to_scheduler.connect(scheduler)
self.send_to_scheduler({'function': 'register'}, {})
self.scheduler_functions = {'status': self.status_to_scheduler,
'compute': self.compute,
'getitem': self.getitem_scheduler,
'delitem': self.delitem,
'setitem': self.setitem,
'close': self.close_from_scheduler}
self.worker_functions = {'getitem': self.getitem_worker,
'getitem-ack': self.getitem_ack,
'status': self.status_to_worker}
log(self.address, 'Start up', self.scheduler)
self._listen_scheduler_thread = Thread(target=self.listen_to_scheduler)
self._listen_scheduler_thread.start()
self._listen_workers_thread = Thread(target=self.listen_to_workers)
self._listen_workers_thread.start()
if block:
self.block()
def status_to_scheduler(self, header, payload):
out_header = {'jobid': header.get('jobid')}
log(self.address, 'Status check', header['address'])
self.send_to_scheduler(out_header, 'OK')
def status_to_worker(self, header, payload):
out_header = {'jobid': header.get('jobid')}
log(self.address, 'Status check', header['address'])
self.send_to_worker(header['address'], out_header, 'OK')
def getitem_worker(self, header, payload):
""" Get data and send to another worker
See also:
Worker.collect
"""
loads = header.get('loads', pickle.loads)
payload = loads(payload)
log(self.address, "Getitem for worker", header, payload)
header2 = {'function': 'getitem-ack',
'jobid': header.get('jobid')}
try:
result = self.data[payload['key']]
header2['status'] = 'OK'
except KeyError as e:
result = e
header2['status'] = 'Bad key'
payload = {'key': payload['key'],
'value': result,
'queue': payload['queue']}
self.send_to_worker(header['address'], header2, payload)
def getitem_ack(self, header, payload):
""" Receive data after sending a getitem request
See also:
Worker.getitem_worker
Worker.collect
"""
with logerrors():
loads = header.get('loads', pickle.loads)
payload = loads(payload)
log(self.address, 'Getitem ack', payload)
assert header['status'] == 'OK'
self.data[payload['key']] = payload['value']
self.queues[payload['queue']].put(payload['key'])
def getitem_scheduler(self, header, payload):
""" Send local data to scheduler
See also:
Scheduler.gather
Scheduler.getitem_ack
"""
loads = header.get('loads', pickle.loads)
payload = loads(payload)
log(self.address, 'Get from scheduler', payload)
key = payload['key']
header2 = {'jobid': header.get('jobid')}
try:
result = self.data[key]
header2['status'] = 'OK'
except KeyError as e:
result = e
header2['status'] = 'Bad key'
header2['function'] = 'getitem-ack'
payload2 = {'key': key, 'value': result, 'queue': payload['queue']}
self.send_to_scheduler(header2, payload2)
def setitem(self, header, payload):
""" Assign incoming data to local dictionary
See also:
Scheduler.scatter
Scheduler.send_data
Scheduler.setitem_ack
"""
loads = header.get('loads', pickle.loads)
payload = loads(payload)
log(self.address, 'Setitem', payload)
key = payload['key']
value = payload['value']
self.data[key] = value
queue = payload.get('queue', False)
if queue:
header2 = {'jobid': header.get('jobid'),
'function': 'setitem-ack'}
payload2 = {'key': key, 'queue': queue}
log(self.address, 'Setitem send ack to scheduler',
header2, payload2)
self.send_to_scheduler(header2, payload2)
def delitem(self, header, payload):
""" Remove item from local data """
loads = header.get('loads', pickle.loads)
payload = loads(payload)
log(self.address, 'Delitem', payload)
key = payload['key']
del self.data[key]
# TODO: this should be replaced with a delitem-ack call
if payload.get('reply', False):
self.send_to_scheduler({'jobid': header.get('jobid')}, 'OK')
def send_to_scheduler(self, header, payload):
""" Send data to scheduler """
log(self.address, 'Send to scheduler', header)
header['address'] = self.address
header['timestamp'] = datetime.utcnow()
dumps = header.get('dumps', pickle_dumps)
with self.lock:
self.to_scheduler.send_multipart([pickle_dumps(header),
dumps(payload)])
def send_to_worker(self, address, header, payload):
""" Send data to workers
This is a bit tricky. We want to have one DEALER socket per worker.
We cache these in ``self.dealers``. If the number of worker peers is
high then we might run into having too many file descriptors open.
Currently we flush the cache of dealers periodically. This has yet to
be tested.
"""
if address not in self.dealers:
if len(self.dealers) > MAX_DEALERS:
for sock in self.dealers.values():
sock.close()
self.dealers.clear()
sock = self.context.socket(zmq.DEALER)
sock.connect(address)
self.dealers[address] = sock
header['address'] = self.address
header['timestamp'] = datetime.utcnow()
log(self.address, 'Send to worker', address, header)
dumps = header.get('dumps', pickle_dumps)
with self.lock:
self.dealers[address].send_multipart([pickle_dumps(header),
dumps(payload)])
def listen_to_scheduler(self):
"""
Event loop listening to commands from scheduler
Header and Payload should deserialize into dicts of the following form:
Header
{'function': name of function to call, see self.functions,
'jobid': job identifier, defaults to None,
'address': name of sender, defaults to zmq identity}
Payload
--Function specific, for setitem might include the following--
{'key': 'x',
'value': 10}
So the minimal request would be as follows:
>>> sock = context.socket(zmq.DEALER) # doctest: +SKIP
>>> sock.connect('tcp://my-address') # doctest: +SKIP
>>> header = {'function': 'status'}
>>> payload = {}
>>> sock.send_multipart(dumps(header), dumps(status)) # doctest: +SKIP
Or a more complex packet might be as follows:
>>> header = {'function': 'setitem', 'jobid': 1}
>>> payload = {'key': 'x', 'value': 10}
>>> sock.send_multipart(dumps(header), dumps(status)) # doctest: +SKIP
We match the function string against ``self.scheduler_functions`` to
pull out the actual function. We then execute this function with the
provided arguments in another thread from ``self.pool``. That function
may then choose to send results back to the sender.
See Also:
listen_to_workers
send_to_scheduler
"""
while self.status != 'closed':
# Wait on request
try:
if not self.to_scheduler.poll(100):
continue
except zmq.ZMQError:
break
with logerrors():
with self.lock:
header, payload = self.to_scheduler.recv_multipart()
header = pickle.loads(header)
log(self.address, 'Receive job from scheduler', header)
try:
function = self.scheduler_functions[header['function']]
except KeyError:
log(self.address, 'Unknown function', header)
else:
future = self.pool.apply_async(function, args=(header, payload))
def listen_to_workers(self):
""" Listen to communications from workers
See ``listen_to_scheduler`` for more in depth docstring
"""
while self.status != 'closed':
# Wait on request
try:
if not self.to_workers.poll(100):
continue
except zmq.ZMQError:
break
with logerrors():
address, header, payload = self.to_workers.recv_multipart()
header = pickle.loads(header)
if 'address' not in header:
header['address'] = address
log(self.address, 'Receive job from worker', address, header)
try:
function = self.worker_functions[header['function']]
except KeyError:
log(self.address, 'Unknown function', header)
else:
future = self.pool.apply_async(function, args=(header, payload))
def block(self):
""" Block until listener threads close
Warning: If some other thread doesn't call `.close()` then, in the
common case you can not easily escape from this.
"""
self._listen_workers_thread.join()
self._listen_scheduler_thread.join()
log('Unblocked')
def collect(self, locations):
""" Collect data from peers
Given a dictionary of desired data and who holds that data
This fires off getitem reqeusts to one of the hosts for each piece of
data then blocks on all of the responses, then inserts this data into
``self.data``.
Example
-------
>>> locations = {'x': ['tcp://alice:5000', 'tcp://bob:5000'],
... 'y': ['tcp://bob:5000']}
>>> worker.collect(locations) # doctest: +SKIP
Protocol
--------
1. Worker creates unique queue
2. For each data this worker chooses a worker at random that holds
that data and fires off a 'getitem' request
{'key': ..., 'queue': ...}
3. Recipient worker handles the request and fires back a 'getitem-ack'
with the data
{'key': ..., 'value': ..., 'queue': ...}
4. Local getitem_ack function adds the value to the local dict and
puts the key in the queue
5. Once all keys have run through the queue the collect function wakes
up again, releases the queue, and returns control
6? This is often called from Worker.compute; control often ends there
See also:
Worker.getitem
Worker.getitem_ack
Worker.compute
Scheduler.trigger_task
"""
socks = []
qkey = str(uuid.uuid1())
queue = Queue()
self.queues[qkey] = queue
# Send out requests for data
log(self.address, 'Collect data from peers', locations)
counter = 0
with logerrors():
for key, locs in locations.items():
if key in self.data: # already have this locally
continue
worker = random.choice(tuple(locs)) # randomly select one peer
header = {'jobid': key,
'function': 'getitem'}
payload = {'function': 'getitem',
'key': key,
'queue': qkey}
self.send_to_worker(worker, header, payload)
counter += 1
for i in range(counter):
queue.get()
del self.queues[qkey]
log(self.address, 'Collect finishes')
def compute(self, header, payload):
""" Compute dask task
Given a key, task, and locations of data
>>> from operator import add
>>> payload = {'key': 'z',
... 'task': (add, 'x', 'y'),
... 'locations': {'x': ['tcp://alice:5000']},
... 'queue': 'unique-identifier'}
Collect necessary data from locations (see ``collect``),
then compute task and store result into ``self.data``. Finally report
back to the scheduler that we're free.
"""
with logerrors():
# Unpack payload
loads = header.get('loads', pickle.loads)
payload = loads(payload)
locations = payload['locations']
key = payload['key']
task = payload['task']
# Grab data from peers
if locations:
self.collect(locations)
# Do actual work
start = time()
status = "OK"
log(self.address, "Start computation", key, task)
try:
result = core.get(self.data, task)
end = time()
except Exception as e:
status = e
end = time()
else:
self.data[key] = result
log(self.address, "End computation", key, task, status)
# Report finished to scheduler
header2 = {'function': 'finished-task'}
result = {'key': key,
'duration': end - start,
'status': status,
'dependencies': list(locations),
'queue': payload['queue']}
self.send_to_scheduler(header2, result)
def close_from_scheduler(self, header, payload):
log(self.address, 'Close signal from scheduler')
self.close()
def close(self):
with self.lock:
if self.status != 'closed':
self.status = 'closed'
do_close = True
else:
do_close = False
if do_close:
log(self.address, 'Close')
self.status = 'closed'
for sock in self.dealers.values():
sock.close(linger=1)
self.to_workers.close(linger=1)
self.pool.close()
self.pool.join()
self.block()
self.context.destroy(linger=3)
def __del__(self):
self.close()
def status():
return 'OK'
|
|
from __future__ import absolute_import
from __future__ import print_function
from django.conf import settings
settings.RUNNING_INSIDE_TORNADO = True
# We must call zerver.lib.tornado_ioloop_logging.instrument_tornado_ioloop
# before we import anything else from our project in order for our
# Tornado load logging to work; otherwise we might accidentally import
# zerver.lib.queue (which will instantiate the Tornado ioloop) before
# this.
from zerver.lib.tornado_ioloop_logging import instrument_tornado_ioloop
instrument_tornado_ioloop()
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import os
import sys
import tornado.web
import logging
from tornado import ioloop
from zerver.lib.debug import interactive_debug_listen
from zerver.lib.response import json_response
from zerver.lib.event_queue import process_notification, missedmessage_hook
from zerver.lib.event_queue import setup_event_queue, add_client_gc_hook
from zerver.lib.queue import setup_tornado_rabbitmq
from zerver.lib.socket import get_sockjs_router, respond_send_message
from zerver.middleware import async_request_stop
if settings.USING_RABBITMQ:
from zerver.lib.queue import get_queue_client
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--nokeepalive', action='store_true',
dest='no_keep_alive', default=False,
help="Tells Tornado to NOT keep alive http connections."),
make_option('--noxheaders', action='store_false',
dest='xheaders', default=True,
help="Tells Tornado to NOT override remote IP with X-Real-IP."),
)
help = "Starts a Tornado Web server wrapping Django."
args = '[optional port number or ipaddr:port]\n (use multiple ports to start multiple servers)'
def handle(self, addrport, **options):
# setup unbuffered I/O
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0)
interactive_debug_listen()
import django
from tornado import httpserver, web
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % (port,))
xheaders = options.get('xheaders', True)
no_keep_alive = options.get('no_keep_alive', False)
quit_command = 'CTRL-C'
if settings.DEBUG:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s')
def inner_run():
from django.conf import settings
from django.utils import translation
translation.activate(settings.LANGUAGE_CODE)
print("Validating Django models.py...")
self.validate(display_num_errors=True)
print("\nDjango version %s" % (django.get_version()))
print("Tornado server is running at http://%s:%s/" % (addr, port))
print("Quit the server with %s." % (quit_command,))
if settings.USING_RABBITMQ:
queue_client = get_queue_client()
# Process notifications received via RabbitMQ
queue_client.register_json_consumer('notify_tornado', process_notification)
queue_client.register_json_consumer('tornado_return', respond_send_message)
try:
urls = (r"/notify_tornado",
r"/json/get_events",
r"/json/events",
r"/api/v1/events",
)
# Application is an instance of Django's standard wsgi handler.
application = web.Application([(url, AsyncDjangoHandler) for url in urls]
+ get_sockjs_router().urls,
debug=django.conf.settings.DEBUG,
# Disable Tornado's own request logging, since we have our own
log_function=lambda x: None)
# start tornado web server in single-threaded mode
http_server = httpserver.HTTPServer(application,
xheaders=xheaders,
no_keep_alive=no_keep_alive)
http_server.listen(int(port), address=addr)
if django.conf.settings.DEBUG:
ioloop.IOLoop.instance().set_blocking_log_threshold(5)
setup_event_queue()
add_client_gc_hook(missedmessage_hook)
setup_tornado_rabbitmq()
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
sys.exit(0)
inner_run()
#
# Modify the base Tornado handler for Django
#
from threading import Lock
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.core import signals
class AsyncDjangoHandler(tornado.web.RequestHandler, base.BaseHandler):
initLock = Lock()
def __init__(self, *args, **kwargs):
super(AsyncDjangoHandler, self).__init__(*args, **kwargs)
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
self._request_middleware = None
self.initLock.acquire()
# Check that middleware is still uninitialised.
if self._request_middleware is None:
self.load_middleware()
self.initLock.release()
self._auto_finish = False
self.client_descriptor = None
def get(self):
from tornado.wsgi import WSGIContainer
from django.core.handlers.wsgi import WSGIRequest, get_script_name
import urllib
environ = WSGIContainer.environ(self.request)
environ['PATH_INFO'] = urllib.unquote(environ['PATH_INFO'])
request = WSGIRequest(environ)
request._tornado_handler = self
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
response = self.get_response(request)
if not response:
return
finally:
signals.request_finished.send(sender=self.__class__)
self.set_status(response.status_code)
for h in response.items():
self.set_header(h[0], h[1])
if not hasattr(self, "_new_cookies"):
self._new_cookies = []
self._new_cookies.append(response.cookies)
self.write(response.content)
self.finish()
def head(self):
self.get()
def post(self):
self.get()
def delete(self):
self.get()
def on_connection_close(self):
if self.client_descriptor is not None:
self.client_descriptor.disconnect_handler(client_closed=True)
# Based on django.core.handlers.base: get_response
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django import http
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
try:
# Setup default url resolver for this thread.
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
### ADDED BY ZULIP
request._resolver = resolver
### END ADDED BY ZULIP
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
if response is None:
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
### THIS BLOCK MODIFIED BY ZULIP
if response is None:
from ...decorator import RespondAsynchronously
try:
response = callback(request, *callback_args, **callback_kwargs)
if response is RespondAsynchronously:
async_request_stop(request)
return
except Exception as e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
if response is None:
try:
view_name = callback.__name__
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s returned None." %
(callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and the render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404 as e:
if settings.DEBUG:
from django.views import debug
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except:
try:
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
logging.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
try:
callback, param_dict = resolver.resolve403()
response = callback(request, **param_dict)
except:
try:
response = self.handle_uncaught_exception(request,
resolver, sys.exc_info())
finally:
signals.got_request_exception.send(
sender=self.__class__, request=request)
except SystemExit:
# See https://code.djangoproject.com/ticket/4701
raise
except Exception as e:
exc_info = sys.exc_info()
signals.got_request_exception.send(sender=self.__class__, request=request)
return self.handle_uncaught_exception(request, resolver, exc_info)
finally:
# Reset urlconf on the way out for isolation
urlresolvers.set_urlconf(None)
### ZULIP CHANGE: The remainder of this function was moved
### into its own function, just below, so we can call it from
### finish().
response = self.apply_response_middleware(request, response, resolver)
return response
### Copied from get_response (above in this file)
def apply_response_middleware(self, request, response, resolver):
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def zulip_finish(self, response, request, apply_markdown):
# Make sure that Markdown rendering really happened, if requested.
# This is a security issue because it's where we escape HTML.
# c.f. ticket #64
#
# apply_markdown=True is the fail-safe default.
if response['result'] == 'success' and 'messages' in response and apply_markdown:
for msg in response['messages']:
if msg['content_type'] != 'text/html':
self.set_status(500)
return self.finish('Internal error: bad message format')
if response['result'] == 'error':
self.set_status(400)
# Call the Django response middleware on our object so that
# e.g. our own logging code can run; but don't actually use
# the headers from that since sending those to Tornado seems
# tricky; instead just send the (already json-rendered)
# content on to Tornado
django_response = json_response(res_type=response['result'],
data=response, status=self.get_status())
django_response = self.apply_response_middleware(request, django_response,
request._resolver)
# Pass through the content-type from Django, as json content should be
# served as application/json
self.set_header("Content-Type", django_response['Content-Type'])
return self.finish(django_response.content)
|
|
"""This component provides HA sensor support for Ring Door Bell/Chimes."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_ENTITY_NAMESPACE,
CONF_MONITORED_CONDITIONS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
from . import (
ATTRIBUTION,
DATA_RING_CHIMES,
DATA_RING_DOORBELLS,
DATA_RING_STICKUP_CAMS,
DEFAULT_ENTITY_NAMESPACE,
SIGNAL_UPDATE_RING,
)
_LOGGER = logging.getLogger(__name__)
# Sensor types: Name, category, units, icon, kind
SENSOR_TYPES = {
"battery": ["Battery", ["doorbell", "stickup_cams"], "%", "battery-50", None],
"last_activity": [
"Last Activity",
["doorbell", "stickup_cams"],
None,
"history",
None,
],
"last_ding": ["Last Ding", ["doorbell"], None, "history", "ding"],
"last_motion": [
"Last Motion",
["doorbell", "stickup_cams"],
None,
"history",
"motion",
],
"volume": [
"Volume",
["chime", "doorbell", "stickup_cams"],
None,
"bell-ring",
None,
],
"wifi_signal_category": [
"WiFi Signal Category",
["chime", "doorbell", "stickup_cams"],
None,
"wifi",
None,
],
"wifi_signal_strength": [
"WiFi Signal Strength",
["chime", "doorbell", "stickup_cams"],
"dBm",
"wifi",
None,
],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_ENTITY_NAMESPACE, default=DEFAULT_ENTITY_NAMESPACE
): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for a Ring device."""
ring_chimes = hass.data[DATA_RING_CHIMES]
ring_doorbells = hass.data[DATA_RING_DOORBELLS]
ring_stickup_cams = hass.data[DATA_RING_STICKUP_CAMS]
sensors = []
for device in ring_chimes:
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if "chime" in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
for device in ring_doorbells:
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if "doorbell" in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
for device in ring_stickup_cams:
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if "stickup_cams" in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
add_entities(sensors, True)
return True
class RingSensor(Entity):
"""A sensor implementation for Ring device."""
def __init__(self, hass, data, sensor_type):
"""Initialize a sensor for Ring device."""
super().__init__()
self._sensor_type = sensor_type
self._data = data
self._extra = None
self._icon = "mdi:{}".format(SENSOR_TYPES.get(self._sensor_type)[3])
self._kind = SENSOR_TYPES.get(self._sensor_type)[4]
self._name = "{0} {1}".format(
self._data.name, SENSOR_TYPES.get(self._sensor_type)[0]
)
self._state = None
self._tz = str(hass.config.time_zone)
self._unique_id = f"{self._data.id}-{self._sensor_type}"
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(self.hass, SIGNAL_UPDATE_RING, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""Return False, updates are controlled via the hub."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
attrs["device_id"] = self._data.id
attrs["firmware"] = self._data.firmware
attrs["kind"] = self._data.kind
attrs["timezone"] = self._data.timezone
attrs["type"] = self._data.family
attrs["wifi_name"] = self._data.wifi_name
if self._extra and self._sensor_type.startswith("last_"):
attrs["created_at"] = self._extra["created_at"]
attrs["answered"] = self._extra["answered"]
attrs["recording_status"] = self._extra["recording"]["status"]
attrs["category"] = self._extra["kind"]
return attrs
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self._sensor_type == "battery" and self._state is not None:
return icon_for_battery_level(
battery_level=int(self._state), charging=False
)
return self._icon
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES.get(self._sensor_type)[2]
def update(self):
"""Get the latest data and updates the state."""
_LOGGER.debug("Updating data from %s sensor", self._name)
if self._sensor_type == "volume":
self._state = self._data.volume
if self._sensor_type == "battery":
self._state = self._data.battery_life
if self._sensor_type.startswith("last_"):
history = self._data.history(
limit=5, timezone=self._tz, kind=self._kind, enforce_limit=True
)
if history:
self._extra = history[0]
created_at = self._extra["created_at"]
self._state = "{0:0>2}:{1:0>2}".format(
created_at.hour, created_at.minute
)
if self._sensor_type == "wifi_signal_category":
self._state = self._data.wifi_signal_category
if self._sensor_type == "wifi_signal_strength":
self._state = self._data.wifi_signal_strength
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import random
import struct
import unittest
import common
import network_layer
def any_eid():
return bytearray([random.getrandbits(8) for _ in range(16)])
def any_mac_extended_address():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_rloc16():
return random.getrandbits(16)
def any_ml_eid():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_status():
return random.getrandbits(1)
def any_seconds():
return random.getrandbits(32)
def any_id_sequence():
return random.getrandbits(8)
def any_router_id_mask():
return random.getrandbits(64)
def any_options(count=None):
count = count if count is not None else random.randint(0, 255)
return [random.getrandbits(8) for _ in range(count)]
def any_tlv_data(length=None):
_type = random.getrandbits(8)
length = length if length is not None else random.getrandbits(8)
value = bytearray([random.getrandbits(8) for _ in range(length)])
return bytearray([_type, length]) + value
def any_tlvs_data(count=None):
count = count if count is not None else random.randint(0, 16)
data = bytearray()
for _ in range(count):
data += any_tlv_data(random.randint(1, 15))
return data
class TestTargetEid(unittest.TestCase):
def test_should_return_eid_value_when_eid_property_is_called(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# WHEN
actual_eid = target_eid.eid
# THEN
self.assertEqual(eid, actual_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# THEN
self.assertEqual(target_eid, network_layer.TargetEid(eid))
class TestTargetEidFactory(unittest.TestCase):
def test_should_create_TargetEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
eid = any_eid()
factory = network_layer.TargetEidFactory()
# WHEN
target_eid = factory.parse(io.BytesIO(eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(target_eid, network_layer.TargetEid))
self.assertEqual(eid, target_eid.eid)
class TestMacExtendedAddress(unittest.TestCase):
def test_should_return_mac_address_value_when_mac_address_property_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# WHEN
actual_mac_address = mac_extended_address.mac_address
# THEN
self.assertEqual(mac_address, actual_mac_address)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# THEN
self.assertEqual(mac_extended_address, network_layer.MacExtendedAddress(mac_address))
class TestMacExtendedAddressFactory(unittest.TestCase):
def test_should_create_MacExtendedAddress_from_bytearray_when_parse_method_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
factory = network_layer.MacExtendedAddressFactory()
# WHEN
mac_extended_address = factory.parse(io.BytesIO(mac_address), common.MessageInfo())
# THEN
self.assertTrue(isinstance(mac_extended_address, network_layer.MacExtendedAddress))
self.assertEqual(mac_address, mac_extended_address.mac_address)
class TestRloc16(unittest.TestCase):
def test_should_return_rloc16_value_when_rloc16_property_is_called(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# WHEN
actual_rloc16 = rloc16_obj.rloc16
# THEN
self.assertEqual(rloc16, actual_rloc16)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# THEN
self.assertEqual(rloc16_obj, network_layer.Rloc16(rloc16))
class TestRloc16Factory(unittest.TestCase):
def test_should_create_Rloc16_from_bytearray_when_parse_method_is_called(self):
# GIVEN
rloc16 = any_rloc16()
factory = network_layer.Rloc16Factory()
data = bytearray(struct.pack(">H", rloc16))
# WHEN
rloc16_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(rloc16_obj, network_layer.Rloc16))
self.assertEqual(rloc16, rloc16_obj.rloc16)
class TestMlEid(unittest.TestCase):
def test_should_return_ml_eid_value_when_ml_eid_property_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# WHEN
actual_ml_eid = ml_eid_obj.ml_eid
# THEN
self.assertEqual(ml_eid, actual_ml_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# THEN
self.assertEqual(ml_eid_obj, network_layer.MlEid(ml_eid))
class TestMlEidFactory(unittest.TestCase):
def test_should_create_MlEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
factory = network_layer.MlEidFactory()
# WHEN
ml_eid_obj = factory.parse(io.BytesIO(ml_eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(ml_eid_obj, network_layer.MlEid))
self.assertEqual(ml_eid, ml_eid_obj.ml_eid)
class TestStatus(unittest.TestCase):
def test_should_return_status_value_when_status_property_is_called(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# WHEN
actual_status = status_obj.status
# THEN
self.assertEqual(status, actual_status)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# THEN
self.assertEqual(status_obj, network_layer.Status(status))
class TestStatusFactory(unittest.TestCase):
def test_should_create_Status_from_bytearray_when_parse_method_is_called(self):
# GIVEN
status = any_status()
factory = network_layer.StatusFactory()
data = bytearray([status])
# WHEN
status_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(status_obj, network_layer.Status))
self.assertEqual(status, status_obj.status)
class TestTimeSinceLastTransaction(unittest.TestCase):
def test_should_return_seconds_value_when_seconds_property_is_called(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# WHEN
actual_seconds = time_since_last_transaction.seconds
# THEN
self.assertEqual(seconds, actual_seconds)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# THEN
self.assertEqual(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction(seconds),
)
class TestTimeSinceLastTransactionFactory(unittest.TestCase):
def test_should_create_TimeSinceLastTransaction_from_bytearray_when_parse_method_is_called(self):
# GIVEN
seconds = any_seconds()
factory = network_layer.TimeSinceLastTransactionFactory()
data = bytearray(struct.pack(">L", seconds))
# WHEN
time_since_last_transaction = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction,
))
self.assertEqual(seconds, time_since_last_transaction.seconds)
class TestRouterMask(unittest.TestCase):
def test_should_return_id_sequence_value_when_id_sequence_property_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_mask = network_layer.RouterMask(id_sequence, any_router_id_mask())
# WHEN
actual_id_sequence = router_mask.id_sequence
# THEN
self.assertEqual(id_sequence, actual_id_sequence)
def test_should_return_router_id_mask_value_when_router_id_mask_property_is_called(self):
# GIVEN
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(any_id_sequence(), router_id_mask)
# WHEN
actual_router_id_mask = router_mask.router_id_mask
# THEN
self.assertEqual(router_id_mask, actual_router_id_mask)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(id_sequence, router_id_mask)
# THEN
self.assertEqual(router_mask, network_layer.RouterMask(id_sequence, router_id_mask))
class TestRouterMaskFactory(unittest.TestCase):
def test_should_create_RouterMask_from_bytearray_when_parse_method_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
factory = network_layer.RouterMaskFactory()
data = bytearray([id_sequence]) + struct.pack(">Q", router_id_mask)
# WHEN
router_mask = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(router_mask, network_layer.RouterMask))
self.assertEqual(id_sequence, router_mask.id_sequence)
self.assertEqual(router_id_mask, router_mask.router_id_mask)
class TestNdOption(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# WHEN
actual_options = nd_option.options
# THEN
self.assertEqual(options, actual_options)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# THEN
self.assertEqual(nd_option, network_layer.NdOption(options))
class TestNdOptionFactory(unittest.TestCase):
def test_should_create_NdOption_from_bytearray_when_parse_method_is_called(self):
# GIVEN
options = any_options()
factory = network_layer.NdOptionFactory()
data = bytearray(options)
# WHEN
nd_option = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(nd_option, network_layer.NdOption))
self.assertEqual(options, nd_option.options)
class TestThreadNetworkData(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# WHEN
actual_tlvs = thread_network_data.tlvs
# THEN
self.assertEqual(tlvs, actual_tlvs)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# THEN
self.assertEqual(thread_network_data, network_layer.ThreadNetworkData(tlvs))
class TestThreadNetworkDataFactory(unittest.TestCase):
def test_should_create_ThreadNetworkData_from_bytearray_when_parse_method_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
class DummyNetworkDataTlvsFactory:
def parse(self, data, message_info):
return bytearray(data.read())
factory = network_layer.ThreadNetworkDataFactory(DummyNetworkDataTlvsFactory())
# WHEN
thread_network_data = factory.parse(io.BytesIO(tlvs), common.MessageInfo())
# THEN
self.assertTrue(isinstance(thread_network_data, network_layer.ThreadNetworkData))
self.assertEqual(tlvs, thread_network_data.tlvs)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self,
num_trees=100,
max_nodes=10000,
bagging_fraction=1.0,
num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0,
split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1,
**kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = [constants.DATA_FLOAT] if data_spec is None else data_spec
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs, name='train')
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = [constants.DATA_FLOAT] if data_spec is None else data_spec
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.div(
math_ops.reduce_sum(all_predict, 0), self.params.num_trees,
name='probabilities')
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
# pylint: disable=unused-argument
def training_loss(self, features, labels):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates, tree_threshold_updates,
new_eot) = (self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.node_to_accumulator_map,
finished, split_indices, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_threshold_updates,
dtype=dtypes.int32)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([tree_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished,
non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
|
from formencode.validators import Int
import unittest
from webhelpers2.html.builder import literal
from blazeform.form import Form
from blazeform.element import TextElement
from blazeform.exceptions import ValueInvalid, ElementInvalid, ProgrammingError
from blazeform.util import NotGiven
from blazeutils import DumbObject
L = literal
class TypeRegistrationTest(unittest.TestCase):
def setUp(self):
self.f = Form('login')
def tearDown(self):
self.f = None
def testRegisterElementType1(self):
self.f.register_element_type('testtype', TextElement)
self.assertEqual(TextElement, self.f._registered_types['testtype'])
def testRegisterDuplicateElementType(self):
self.f.register_element_type('testtype', TextElement)
try:
self.f.register_element_type('testtype', TextElement)
except ValueError:
pass
else:
self.fail("expected a ValueError")
class CommonFormUsageTest(unittest.TestCase):
def setUp(self):
self.render_html = '<input class="text" id="login-username" name="username" type="text" />'
def testForm1(self):
"""
most basic usage of a form
"""
form = Form('login')
form.add_text('username', 'User Name')
self.assertEqual(self.render_html, str(form.elements.username.render()))
def testForm4(self):
form = Form('login')
el = form.add_text('username', 'User Name')
self.assertEqual(self.render_html, str(form.elements.username.render()))
self.assertEqual(self.render_html, str(el.render()))
def test_first_class_elements(self):
"""
first element in form and under header should have a 'first' class
"""
form_first_html = '<div id="user-username-row" class="text row odd first">'
header_first_html = '<div id="user-groupname-row" class="text row even first">'
form = Form('user')
form.add_text('username', 'User Name')
form.add_header('group_membership_header', 'Group Membership')
form.add_text('groupname', 'Group')
form_html = form.render()
assert form_html.find(form_first_html) > -1
assert form_html.find(header_first_html) > -1
def test_formencoding(self):
"""ensure form has correct encoding for file uploads"""
f1 = Form('login')
f1.add_text('username', 'User Name')
assert "multipart/form-data" not in f1.render()
f2 = Form('pictures')
f2.add_file('picture', 'Picture')
assert "multipart/form-data" in f2.render()
# make sure this works with grouped elements
f = Form('f')
fg = f.add_elgroup('file-group')
fg.add_file('picture', 'Picture')
assert "multipart/form-data" in f.render()
def test_submit_validation(self):
f1 = Form('login')
assert "login-submit-flag" in f1.render()
def test_is_submit(self):
f1 = Form('login')
assert not f1.is_submitted()
post = {'login-submit-flag': 'submitted'}
f1.set_submitted(post)
assert f1.is_submitted()
def test_is_cancel(self):
f1 = Form('login')
f1.add_cancel('cancel', 'Cancel')
assert not f1.is_cancel()
# cancel button, but form is not submitted
post = {'cancel': 'submitted'}
f1.set_submitted(post)
assert not f1.is_cancel()
# now submit form
post['login-submit-flag'] = 'submitted'
f1.set_submitted(post)
assert f1.is_cancel()
def test_default(self):
f = Form('login')
f.add_text('username', 'User Name')
f.add_file('file')
filesub = DumbObject(filename='text.txt', content_type='text/plain', content_length=10)
f.set_defaults({'username': 'test1', 'file': filesub})
self.assertEqual(
'<input class="text" id="login-username" name="username" type="text" value="test1" />',
str(f.elements.username.render())
)
def test_submit(self):
f = Form('login')
f.add_text('username', 'User Name')
f.set_defaults({'username': 'test1'})
post = {'login-submit-flag': 'submitted', 'username': 'test2'}
f.set_submitted(post)
self.assertEqual(
'<input class="text" id="login-username" name="username" type="text" value="test2" />',
str(f.elements.username.render())
)
assert f.get_values() == {'username': 'test2', 'login-submit-flag': 'submitted'}
def test_submit_by_name(self):
f = Form('login')
f.add_text('username', 'User Name')
f.add_submit('submit')
post = {'login-submit-flag': 'submitted', 'username': 'test2', 'submit': 'Submit'}
f.set_submitted(post)
assert f.get_values() == {'username': 'test2', 'login-submit-flag': 'submitted',
'submit': 'Submit'}
f = Form('login')
f.add_text('username', 'User Name', name="unfield")
f.add_submit('submit', name="submitbtn")
post = {'login-submit-flag': 'submitted', 'unfield': 'test2', 'submitbtn': 'Submit'}
f.set_submitted(post)
self.assertEqual(f.get_values(), {'unfield': 'test2', 'login-submit-flag': 'submitted',
'submitbtn': 'Submit'})
def test_blank_checkbox(self):
html = L('<input checked="checked" class="checkbox" id="login-disabled" name="disabled" '
'type="checkbox" />')
f = Form('login')
el = f.add_checkbox('disabled', 'Disabled', defaultval=True)
self.assertEqual(el(), html)
post = {'login-submit-flag': 'submitted'}
f.set_submitted(post)
dvalue = f.get_values()['disabled']
assert dvalue is False
# should unset on re-post after a blank submit
html = L('<input class="checkbox" id="login-disabled" name="disabled" type="checkbox" />')
self.assertEqual(el(), html)
def test_blank_checkbox_nameattr(self):
html = L('<input checked="checked" class="checkbox" id="login-disabled" name="mycb" '
'type="checkbox" />')
f = Form('login')
el = f.add_checkbox('disabled', 'Disabled', defaultval=True, name="mycb")
self.assertEqual(el(), html)
post = {'login-submit-flag': 'submitted'}
f.set_submitted(post)
dvalue = f.get_values()['mycb']
assert dvalue is False
# should unset on re-post after a blank submit
html = L('<input class="checkbox" id="login-disabled" name="mycb" type="checkbox" />')
self.assertEqual(el(), html)
def test_blank_multiselect(self):
f = Form('login')
options = [(1, 'one'), (2, 'two')]
el = f.add_mselect('numlist', options, 'Disabled', defaultval=2)
assert 'selected="selected"' in el()
post = {'login-submit-flag': 'submitted'}
f.set_submitted(post)
assert not f.get_values()['numlist']
# should unset on re-post after a blank submit
assert 'selected="selected"' not in el()
def test_blank_multicheckbox(self):
f = Form('login')
el1 = f.add_mcheckbox('mcheck1', 'Check 1', 1, 'cgroup1', checked=True)
el2 = f.add_mcheckbox('mcheck2', 'Check 2', 2, 'cgroup1')
assert 'checked="checked"' in el1()
assert 'checked="checked"' not in el2()
post = {'login-submit-flag': 'submitted'}
f.set_submitted(post)
assert not f.get_values()['cgroup1']
# should unset on re-post after a blank submit
assert 'checked="checked"' not in el1()
assert 'checked="checked"' not in el2()
def test_blank_radio(self):
f = Form('login')
el1 = f.add_radio('radio1', 'Radio 1', 1, 'rgroup1', selected=True)
el2 = f.add_radio('radio2', 'Radio 2', 2, 'rgroup1')
assert 'checked="checked"' in el1()
assert 'checked="checked"' not in el2()
post = {'login-submit-flag': 'submitted'}
f.set_submitted(post)
assert not f.get_values()['rgroup1']
# should unset on re-post after a blank submit
assert 'selected="selected"' not in el1()
assert 'selected="selected"' not in el2()
def test_dup_fields(self):
f = Form('f')
f.add_text('f')
try:
f.add_text('f')
self.fail('should not be able to add elements with the same id')
except ValueError:
pass
def test_is_valid(self):
f = Form('f')
f.add_text('f')
# wasn't submitted, so not valid
assert not f.is_valid()
f.set_submitted({'f-submit-flag': 'submitted'})
assert f.is_valid()
f = Form('f')
f.add_text('f', required=True)
# wasn't submitted, so not valid
assert not f.is_valid()
f.set_submitted({'f-submit-flag': 'submitted'})
assert not f.is_valid()
f.set_submitted({'f-submit-flag': 'submitted', 'f': 'foo'})
assert f.is_valid()
def test_form_validators(self):
def validator(form):
if form.elements.myfield.is_valid():
if form.elements.myfield.value != 'foo':
raise ValueInvalid('My Field: must be "foo", not "%s"' %
form.elements.myfield.value)
f = Form('f')
f.add_text('myfield', 'My Field')
f.add_validator(validator)
f.set_submitted({'f-submit-flag': 'submitted', 'myfield': 'bar'})
assert not f.is_valid()
self.assertEqual(f._errors[0], 'My Field: must be "foo", not "bar"')
f.set_submitted({'f-submit-flag': 'submitted', 'myfield': 'foo'})
assert f.is_valid()
assert len(f._errors) == 0
# custom message
f = Form('f')
f.add_text('myfield', 'My Field')
f.add_validator(validator, 'value incorrect')
f.set_submitted({'f-submit-flag': 'submitted', 'myfield': 'bar'})
assert not f.is_valid()
self.assertEqual(f._errors[0], 'value incorrect')
def test_validator_fe_class(self):
form = Form('f')
form.add_text('units', 'Units')
form.add_validator(Int)
assert isinstance(form._validators[0][0], Int)
def test_validator_fe_instance(self):
form = Form('f')
form.add_text('units', 'Units')
form.add_validator(Int())
assert isinstance(form._validators[0][0], Int)
def test_validator_recursion(self):
"""
referencing .value from that field's validator causes a recursion
"""
f = Form('f')
def validator(form):
try:
f.elements.myfield.value
except ElementInvalid as e:
raise ValueInvalid(e)
el = f.add_text('myfield', 'My Field', maxlength=1)
el.add_processor(validator)
f.set_submitted({'f-submit-flag': 'submitted', 'myfield': '12'})
try:
assert not f.is_valid()
except RuntimeError as e:
assert 'maximum recursion depth exceeded' in str(e), str(e)
def test_validator_element_invalid(self):
"""
If a validator references an invalid element, then we don't let
that exception propogate
"""
f = Form('f')
def validator(form):
f.elements.f1.value
f.add_text('f1', 'f1', maxlength=1)
f.add_text('f2', 'f2')
f.add_validator(validator)
f.set_submitted({'f-submit-flag': 'submitted', 'f1': '12'})
assert not f.is_valid()
def test_add_field_errors_string(self):
form = Form('f')
form.add_text('text1', 'Value')
form.add_text('text2', 'Value')
result = form.add_field_errors({
'text1': 'Generic Error',
'text2': 'Error'
})
assert result
self.assertEqual(form.elements.text1.errors, ['Generic Error'])
self.assertEqual(form.elements.text2.errors, ['Error'])
def test_add_field_errors_list(self):
form = Form('f')
form.add_text('text1', 'Value')
form.add_text('text2', 'Value')
result = form.add_field_errors({
'text1': ['Generic Error 1', 'Generic Error 2'],
'text2': ['Error 1', 'Error 2']
})
assert result
assert len(form.elements.text1.errors) == 2
self.assertEqual(form.elements.text1.errors, ['Generic Error 1', 'Generic Error 2'])
self.assertEqual(form.elements.text2.errors, ['Error 1', 'Error 2'])
def test_add_field_errors_extras(self):
# the result of calling add_field_errors() when all errors are not
# processed should be False
form = Form('f')
form.add_text('text1', 'Value')
form.add_text('text2', 'Value')
result = form.add_field_errors({
'text1': 'Generic Error',
'text2': 'Error',
'not there': 'Error'
})
assert result is False
def test_exception_handling(self):
# works with an element handler
form = Form('f')
el = form.add_text('field', 'Field')
el.add_handler('text exception', 'test error msg')
assert form.handle_exception(Exception('text exception'))
self.assertEqual(el.errors[0], 'test error msg')
# make sure exception on second field works
form = Form('f')
el = form.add_text('field', 'Field')
el.add_handler('not it', '')
el2 = form.add_text('field2', 'Field')
el2.add_handler('text exception', 'test error msg')
assert form.handle_exception(Exception('text exception'))
self.assertEqual(el2.errors[0], 'test error msg')
# form exceptions
f = Form('f')
f.add_handler('text exception', 'test error msg')
assert f.handle_exception(Exception('text exception'))
self.assertEqual(f._errors[0], 'test error msg')
# make sure second exception works too
f = Form('f')
f.add_handler('not it', '')
f.add_handler('text exception', 'test error msg')
assert f.handle_exception(Exception('text exception'))
self.assertEqual(f._errors[0], 'test error msg')
# specifying exception type
f = Form('f')
f.add_handler('text exception', 'test error msg', Exception)
assert f.handle_exception(Exception('text exception'))
self.assertEqual(f._errors[0], 'test error msg')
# right message, wrong type
f = Form('f')
f.add_handler('text exception', 'test error msg', ValueError)
assert not f.handle_exception(Exception('text exception'))
self.assertEqual(len(f._errors), 0)
# wrong message
f = Form('f')
f.add_handler('text exception', 'test error msg', Exception)
assert not f.handle_exception(Exception('text'))
self.assertEqual(len(f._errors), 0)
def test_submitted_only_when_appropriate(self):
f1 = Form('login1')
f1.add_text('field')
f2 = Form('login2')
f2.add_text('field')
post = {
'login1-submit-flag': 'submitted',
'field': 'foo'
}
f1.set_submitted(post)
assert f1.is_submitted()
assert f1.elements.field.value == 'foo'
f2.set_submitted(post)
assert not f2.is_submitted()
assert f2.elements.field.value is NotGiven
def test_exception_on_static_submit(self):
f1 = Form('login1', static=True)
f1.add_text('field')
post = {
'login1-submit-flag': 'submitted',
'field': 'foo'
}
try:
f1.set_submitted(post)
assert False, 'expected exception for submitting to static form'
except ProgrammingError:
pass
def test_all_errors(self):
def validator(form):
if form.elements.myfield.is_valid():
if form.elements.myfield.value != 'foo':
raise ValueInvalid('My Field: must be "foo", not "%s"' %
form.elements.myfield.value)
f1 = Form('login1')
f1.add_text('field', 'Field', required=True)
f1.add_text('myfield', 'My Field', required=True)
f1.add_validator(validator)
post = {
'login1-submit-flag': 'submitted',
'myfield': 'bar'
}
f1.set_submitted(post)
assert not f1.is_valid()
form_errors, field_errors = f1.all_errors()
self.assertEqual(field_errors, {'Field': ['field is required']})
self.assertEqual(form_errors, ['My Field: must be "foo", not "bar"'])
# now make sure we can set the id as the field errors dict key if needed
form_errors, field_errors = f1.all_errors(id_as_key=True)
self.assertEqual(field_errors, {'field': ['field is required']})
# run the tests if module called directly
if __name__ == "__main__":
unittest.main()
|
|
import os
import hashlib
import inspect
from lib import BaseTest
def strip_processor(output):
return "\n".join([l for l in output.split("\n") if not l.startswith(' ') and not l.startswith('Date:')])
class PublishSwitch1Test(BaseTest):
"""
publish switch: removed some packages
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly snapshot create snap2 empty",
"aptly snapshot pull -no-deps -architectures=i386,amd64 snap2 snap1 snap3 gnuplot-x11",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap1",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec maverick snap3"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSwitch1Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/Contents-i386.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.bz2')
self.check_exists('public/dists/maverick/main/Contents-amd64.gz')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_i386.deb')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_amd64.deb')
self.check_not_exists('public/pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_i386.deb')
self.check_not_exists('public/pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_amd64.deb')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
self.check_file_contents('public/dists/maverick/main/binary-i386/Packages', 'binary', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
# verify signatures
self.run_cmd([self.gpgFinder.gpg, "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd([self.gpgFinder.gpg, "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
# verify sums
release = self.read_file('public/dists/maverick/Release').split("\n")
release = [l for l in release if l.startswith(" ")]
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)
st = os.stat(os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/', path))
if fileSize != st.st_size:
raise Exception("file size doesn't match for %s: %d != %d" % (path, fileSize, st.st_size))
if len(fileHash) == 32:
h = hashlib.md5()
elif len(fileHash) == 40:
h = hashlib.sha1()
elif len(fileHash) == 64:
h = hashlib.sha256()
else:
h = hashlib.sha512()
h.update(self.read_file(os.path.join('public/dists/maverick', path)))
if h.hexdigest() != fileHash:
raise Exception("file hash doesn't match for %s: %s != %s" % (path, fileHash, h.hexdigest()))
if pathsSeen != set(['main/binary-amd64/Packages', 'main/binary-i386/Packages', 'main/binary-i386/Packages.gz',
'main/binary-amd64/Packages.gz', 'main/binary-amd64/Packages.bz2', 'main/binary-i386/Packages.bz2',
'main/binary-amd64/Release', 'main/binary-i386/Release', 'main/Contents-amd64.gz',
'main/Contents-i386.gz', 'Contents-i386.gz', 'Contents-amd64.gz']):
raise Exception("path seen wrong: %r" % (pathsSeen, ))
class PublishSwitch2Test(BaseTest):
"""
publish switch: added some packages
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly snapshot create snap2 empty",
"aptly snapshot pull -no-deps -architectures=i386,amd64 snap2 snap1 snap3 gnuplot-x11",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap3 ppa",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec maverick ppa snap1"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSwitch2Test, self).check()
self.check_exists('public/ppa/dists/maverick/InRelease')
self.check_exists('public/ppa/dists/maverick/Release')
self.check_exists('public/ppa/dists/maverick/Release.gpg')
self.check_exists('public/ppa/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/ppa/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/ppa/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/ppa/dists/maverick/main/Contents-i386.gz')
self.check_exists('public/ppa/dists/maverick/main/binary-amd64/Packages')
self.check_exists('public/ppa/dists/maverick/main/binary-amd64/Packages.gz')
self.check_exists('public/ppa/dists/maverick/main/binary-amd64/Packages.bz2')
self.check_exists('public/ppa/dists/maverick/main/Contents-amd64.gz')
self.check_exists('public/ppa/pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_i386.deb')
self.check_exists('public/ppa/pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_amd64.deb')
self.check_exists('public/ppa/pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_i386.deb')
self.check_exists('public/ppa/pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_amd64.deb')
# verify contents except of sums
self.check_file_contents('public/ppa/dists/maverick/main/binary-i386/Packages', 'binary', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
class PublishSwitch3Test(BaseTest):
"""
publish switch: removed some packages, files occupied by another package
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly snapshot create snap2 empty",
"aptly snapshot pull -no-deps -architectures=i386,amd64 snap2 snap1 snap3 gnuplot-x11",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap1",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick2 snap1",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec maverick snap3"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSwitch3Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/Contents-i386.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.bz2')
self.check_exists('public/dists/maverick/main/Contents-amd64.gz')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_i386.deb')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_amd64.deb')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_i386.deb')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_amd64.deb')
class PublishSwitch4Test(BaseTest):
"""
publish switch: added some packages, but list of published archs doesn't change
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly snapshot create snap2 empty",
"aptly snapshot pull -no-deps -architectures=i386 snap2 snap1 snap3 gnuplot-x11",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap3 ppa",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec maverick ppa snap1"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSwitch4Test, self).check()
self.check_exists('public/ppa/dists/maverick/InRelease')
self.check_exists('public/ppa/dists/maverick/Release')
self.check_exists('public/ppa/dists/maverick/Release.gpg')
self.check_exists('public/ppa/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/ppa/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/ppa/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/ppa/dists/maverick/main/Contents-i386.gz')
self.check_not_exists('public/ppa/dists/maverick/main/binary-amd64/Packages')
self.check_not_exists('public/ppa/dists/maverick/main/binary-amd64/Packages.gz')
self.check_not_exists('public/ppa/dists/maverick/main/binary-amd64/Packages.bz2')
self.check_exists('public/ppa/pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_i386.deb')
self.check_not_exists('public/ppa/pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_amd64.deb')
self.check_exists('public/ppa/pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_i386.deb')
self.check_not_exists('public/ppa/pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_amd64.deb')
class PublishSwitch5Test(BaseTest):
"""
publish switch: no such publish
"""
fixtureCmds = [
"aptly snapshot create snap1 empty",
]
runCmd = "aptly publish switch maverick ppa snap1"
expectedCode = 1
class PublishSwitch6Test(BaseTest):
"""
publish switch: not a snapshot
"""
fixtureCmds = [
"aptly snapshot create snap1 empty",
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
"aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick local-repo",
]
runCmd = "aptly publish switch maverick snap1"
expectedCode = 1
class PublishSwitch7Test(BaseTest):
"""
publish switch: no snapshot
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap1",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec maverick snap3"
expectedCode = 1
class PublishSwitch8Test(BaseTest):
"""
publish switch: multi-component switching
"""
fixtureDB = True
fixturePoolCopy = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly snapshot create snap2 empty",
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
"aptly snapshot create local1 from repo local-repo",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick -component=a,b,c snap1 snap2 local1",
"aptly snapshot pull -no-deps -architectures=i386,amd64 snap2 snap1 snap3 gnuplot-x11",
"aptly repo remove local-repo pyspi",
"aptly snapshot create local2 from repo local-repo",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -component=b,c maverick snap3 local2"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSwitch8Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
for component in ("a", "b", "c"):
self.check_exists('public/dists/maverick/' + component + '/binary-i386/Packages')
self.check_exists('public/dists/maverick/' + component + '/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/' + component + '/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/' + component + '/Contents-i386.gz')
self.check_exists('public/dists/maverick/' + component + '/binary-amd64/Packages')
self.check_exists('public/dists/maverick/' + component + '/binary-amd64/Packages.gz')
self.check_exists('public/dists/maverick/' + component + '/binary-amd64/Packages.bz2')
if component == "c":
self.check_not_exists('public/dists/maverick/' + component + '/Contents-amd64.gz')
else:
self.check_exists('public/dists/maverick/' + component + '/Contents-amd64.gz')
self.check_exists('public/dists/maverick/' + component + '/source/Sources')
self.check_exists('public/dists/maverick/' + component + '/source/Sources.gz')
self.check_exists('public/dists/maverick/' + component + '/source/Sources.bz2')
self.check_exists('public/pool/a/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_i386.deb')
self.check_exists('public/pool/a/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_amd64.deb')
self.check_exists('public/pool/a/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_i386.deb')
self.check_exists('public/pool/a/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_amd64.deb')
self.check_exists('public/pool/b/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_i386.deb')
self.check_exists('public/pool/b/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_amd64.deb')
self.check_not_exists('public/pool/b/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_i386.deb')
self.check_not_exists('public/pool/b/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_amd64.deb')
self.check_exists('public/pool/c/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_not_exists('public/pool/c/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_not_exists('public/pool/c/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_not_exists('public/pool/c/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_not_exists('public/pool/c/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
self.check_file_contents('public/dists/maverick/a/binary-i386/Packages', 'binaryA', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
self.check_file_contents('public/dists/maverick/b/binary-i386/Packages', 'binaryB', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
self.check_file_contents('public/dists/maverick/c/binary-i386/Packages', 'binaryC', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
# verify signatures
self.run_cmd([self.gpgFinder.gpg, "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd([self.gpgFinder.gpg, "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
# verify sums
release = self.read_file('public/dists/maverick/Release').split("\n")
release = [l for l in release if l.startswith(" ")]
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)
st = os.stat(os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/', path))
if fileSize != st.st_size:
raise Exception("file size doesn't match for %s: %d != %d" % (path, fileSize, st.st_size))
if len(fileHash) == 32:
h = hashlib.md5()
elif len(fileHash) == 40:
h = hashlib.sha1()
elif len(fileHash) == 64:
h = hashlib.sha256()
else:
h = hashlib.sha512()
h.update(self.read_file(os.path.join('public/dists/maverick', path)))
if h.hexdigest() != fileHash:
raise Exception("file hash doesn't match for %s: %s != %s" % (path, fileHash, h.hexdigest()))
if pathsSeen != set(['a/binary-amd64/Packages', 'c/source/Sources', 'c/binary-amd64/Packages.bz2', 'b/binary-amd64/Packages',
'a/source/Sources', 'a/binary-i386/Packages.bz2', 'b/source/Sources.bz2', 'b/binary-amd64/Packages.bz2',
'c/binary-i386/Packages', 'a/binary-i386/Packages', 'c/binary-amd64/Packages', 'a/source/Sources.gz',
'b/binary-i386/Packages.gz', 'c/binary-amd64/Packages.gz', 'a/binary-amd64/Packages.bz2', 'c/source/Sources.bz2',
'c/source/Sources.gz', 'a/source/Sources.bz2', 'b/binary-i386/Packages.bz2', 'a/binary-i386/Packages.gz',
'a/binary-amd64/Packages.gz', 'c/binary-i386/Packages.bz2', 'b/binary-amd64/Packages.gz', 'b/source/Sources',
'c/binary-i386/Packages.gz', 'b/source/Sources.gz', 'b/binary-i386/Packages',
'a/binary-amd64/Release', 'b/binary-amd64/Release', 'c/binary-amd64/Release',
'a/binary-i386/Release', 'b/binary-i386/Release', 'c/binary-i386/Release',
'a/source/Release', 'b/source/Release', 'c/source/Release',
'b/Contents-amd64.gz', 'c/Contents-i386.gz', 'a/Contents-i386.gz',
'a/Contents-amd64.gz', 'b/Contents-i386.gz', 'Contents-i386.gz', 'Contents-amd64.gz']):
raise Exception("path seen wrong: %r" % (pathsSeen, ))
class PublishSwitch9Test(BaseTest):
"""
publish switch: components/snapshots mismatch
"""
fixtureCmds = [
"aptly snapshot create snap1 empty",
"aptly snapshot create snap2 empty",
"aptly publish snapshot -architectures=i386 -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick -component=a,b snap1 snap2",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -component=a,b maverick snap2"
expectedCode = 2
def outputMatchPrepare(_, s):
return "\n".join([l for l in s.split("\n") if l.startswith("ERROR")])
class PublishSwitch10Test(BaseTest):
"""
publish switch: conflicting files in the snapshot
"""
fixtureCmds = [
"aptly repo create local-repo1",
"aptly repo add local-repo1 ${files}",
"aptly snapshot create snap1 from repo local-repo1",
"aptly repo create local-repo2",
"aptly repo add local-repo2 ${testfiles}",
"aptly snapshot create snap2 from repo local-repo2",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap1",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec maverick snap2"
expectedCode = 1
gold_processor = BaseTest.expand_environ
class PublishSwitch11Test(BaseTest):
"""
publish switch: -force-overwrite
"""
fixtureCmds = [
"aptly repo create local-repo1",
"aptly repo add local-repo1 ${files}",
"aptly snapshot create snap1 from repo local-repo1",
"aptly repo create local-repo2",
"aptly repo add local-repo2 ${testfiles}",
"aptly snapshot create snap2 from repo local-repo2",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap1",
]
runCmd = "aptly publish switch -force-overwrite -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec maverick snap2"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSwitch11Test, self).check()
self.check_file_contents("public/pool/main/p/pyspi/pyspi_0.6.1.orig.tar.gz", "file")
class PublishSwitch12Test(BaseTest):
"""
publish switch: wrong component names
"""
fixtureCmds = [
"aptly snapshot create snap1 empty",
"aptly snapshot create snap2 empty",
"aptly publish snapshot -architectures=i386 -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick -component=a,b snap1 snap2",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -component=a,c maverick snap2 snap1"
expectedCode = 1
class PublishSwitch13Test(BaseTest):
"""
publish switch: -skip-contents
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly snapshot create snap2 empty",
"aptly snapshot pull -no-deps -architectures=i386,amd64 snap2 snap1 snap3 gnuplot-x11",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick -skip-contents snap1",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec maverick snap3"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSwitch13Test, self).check()
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_not_exists('public/dists/maverick/main/Contents-i386.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages')
self.check_not_exists('public/dists/maverick/main/Contents-amd64.gz')
class PublishSwitch14Test(BaseTest):
"""
publish switch: removed some packages skipping cleanup
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly snapshot create snap2 empty",
"aptly snapshot pull -no-deps -architectures=i386,amd64 snap2 snap1 snap3 gnuplot-x11",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap1",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -skip-cleanup maverick snap3"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSwitch14Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/Contents-i386.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.bz2')
self.check_exists('public/dists/maverick/main/Contents-amd64.gz')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_i386.deb')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_amd64.deb')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_i386.deb')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_amd64.deb')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
self.check_file_contents('public/dists/maverick/main/binary-i386/Packages', 'binary', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
# verify signatures
self.run_cmd([self.gpgFinder.gpg, "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd([self.gpgFinder.gpg, "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
# verify sums
release = self.read_file('public/dists/maverick/Release').split("\n")
release = [l for l in release if l.startswith(" ")]
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)
st = os.stat(os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/', path))
if fileSize != st.st_size:
raise Exception("file size doesn't match for %s: %d != %d" % (path, fileSize, st.st_size))
if len(fileHash) == 32:
h = hashlib.md5()
elif len(fileHash) == 40:
h = hashlib.sha1()
elif len(fileHash) == 64:
h = hashlib.sha256()
else:
h = hashlib.sha512()
h.update(self.read_file(os.path.join('public/dists/maverick', path)))
if h.hexdigest() != fileHash:
raise Exception("file hash doesn't match for %s: %s != %s" % (path, fileHash, h.hexdigest()))
if pathsSeen != set(['main/binary-amd64/Packages', 'main/binary-i386/Packages', 'main/binary-i386/Packages.gz',
'main/binary-amd64/Packages.gz', 'main/binary-amd64/Packages.bz2', 'main/binary-i386/Packages.bz2',
'main/binary-amd64/Release', 'main/binary-i386/Release', 'main/Contents-amd64.gz',
'main/Contents-i386.gz', 'Contents-i386.gz', 'Contents-amd64.gz']):
raise Exception("path seen wrong: %r" % (pathsSeen, ))
|
|
"""Template helper methods for rendering strings with HA data."""
from datetime import datetime
import json
import logging
import re
import jinja2
from jinja2.sandbox import ImmutableSandboxedEnvironment
from homeassistant.const import (
STATE_UNKNOWN, ATTR_LATITUDE, ATTR_LONGITUDE, MATCH_ALL)
from homeassistant.core import State
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import location as loc_helper
from homeassistant.loader import get_component
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async import run_callback_threadsafe
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RE_NONE_ENTITIES = re.compile(r"distance\(|closest\(", re.I | re.M)
_RE_GET_ENTITIES = re.compile(
r"(?:(?:states\.|(?:is_state|is_state_attr|states)\(.)([\w]+\.[\w]+))",
re.I | re.M
)
def attach(hass, obj):
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, dict):
for child in obj.values():
attach(hass, child)
elif isinstance(obj, Template):
obj.hass = hass
def extract_entities(template):
"""Extract all entities for state_changed listener from template string."""
if template is None or _RE_NONE_ENTITIES.search(template):
return MATCH_ALL
extraction = _RE_GET_ENTITIES.findall(template)
if len(extraction) > 0:
return list(set(extraction))
return MATCH_ALL
class Template(object):
"""Class to hold a template and manage caching and rendering."""
def __init__(self, template, hass=None):
"""Instantiate a Template."""
if not isinstance(template, str):
raise TypeError('Expected template to be a string')
self.template = template
self._compiled_code = None
self._compiled = None
self.hass = hass
def ensure_valid(self):
"""Return if template is valid."""
if self._compiled_code is not None:
return
try:
self._compiled_code = ENV.compile(self.template)
except jinja2.exceptions.TemplateSyntaxError as err:
raise TemplateError(err)
def extract_entities(self):
"""Extract all entities for state_changed listener."""
return extract_entities(self.template)
def render(self, variables=None, **kwargs):
"""Render given template."""
if variables is not None:
kwargs.update(variables)
return run_callback_threadsafe(
self.hass.loop, self.async_render, kwargs).result()
def async_render(self, variables=None, **kwargs):
"""Render given template.
This method must be run in the event loop.
"""
self._ensure_compiled()
if variables is not None:
kwargs.update(variables)
try:
return self._compiled.render(kwargs).strip()
except jinja2.TemplateError as err:
raise TemplateError(err)
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
return run_callback_threadsafe(
self.hass.loop, self.async_render_with_possible_json_value, value,
error_value).result()
# pylint: disable=invalid-name
def async_render_with_possible_json_value(self, value,
error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
self._ensure_compiled()
variables = {
'value': value
}
try:
variables['value_json'] = json.loads(value)
except ValueError:
pass
try:
return self._compiled.render(variables).strip()
except jinja2.TemplateError as ex:
_LOGGER.error('Error parsing value: %s (value: %s, template: %s)',
ex, value, self.template)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(self):
"""Bind a template to a specific hass instance."""
if self._compiled is not None:
return
self.ensure_valid()
assert self.hass is not None, 'hass variable not set on template'
location_methods = LocationMethods(self.hass)
global_vars = ENV.make_globals({
'closest': location_methods.closest,
'distance': location_methods.distance,
'is_state': self.hass.states.is_state,
'is_state_attr': self.hass.states.is_state_attr,
'states': AllStates(self.hass),
})
self._compiled = jinja2.Template.from_code(
ENV, self._compiled_code, global_vars, None)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (self.__class__ == other.__class__ and
self.template == other.template and
self.hass == other.hass)
class AllStates(object):
"""Class to expose all HA states as attributes."""
def __init__(self, hass):
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
return DomainStates(self._hass, name)
def __iter__(self):
"""Return all states."""
return iter(sorted(self._hass.states.async_all(),
key=lambda state: state.entity_id))
def __call__(self, entity_id):
"""Return the states."""
state = self._hass.states.get(entity_id)
return STATE_UNKNOWN if state is None else state.state
class DomainStates(object):
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass, domain):
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return self._hass.states.get('{}.{}'.format(self._domain, name))
def __iter__(self):
"""Return the iteration over all the states."""
return iter(sorted(
(state for state in self._hass.states.async_all()
if state.domain == self._domain),
key=lambda state: state.entity_id))
class LocationMethods(object):
"""Class to expose distance helpers to templates."""
def __init__(self, hass):
"""Initialize the distance helpers."""
self._hass = hass
def closest(self, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
"""
if len(args) == 1:
latitude = self._hass.config.latitude
longitude = self._hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = self._resolve_state(args[0])
if point_state is None:
_LOGGER.warning('Closest:Unable to find state %s', args[0])
return None
elif not loc_helper.has_location(point_state):
_LOGGER.warning(
'Closest:State does not contain valid location: %s',
point_state)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
'Closest:Received invalid coordinates: %s, %s',
args[0], args[1])
return None
entities = args[2]
if isinstance(entities, (AllStates, DomainStates)):
states = list(entities)
else:
if isinstance(entities, State):
gr_entity_id = entities.entity_id
else:
gr_entity_id = str(entities)
group = get_component('group')
states = [self._hass.states.get(entity_id) for entity_id
in group.expand_entity_ids(self._hass, [gr_entity_id])]
return loc_helper.closest(latitude, longitude, states)
def distance(self, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, State):
latitude = value.attributes.get(ATTR_LATITUDE)
longitude = value.attributes.get(ATTR_LONGITUDE)
if latitude is None or longitude is None:
_LOGGER.warning(
'Distance:State does not contains a location: %s',
value)
return None
else:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
'Distance:Expected latitude and longitude, got %s',
value)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning('Distance:Unable to process latitude and '
'longitude: %s, %s', value, value_2)
return None
locations.append((latitude, longitude))
if len(locations) == 1:
return self._hass.config.distance(*locations[0])
return self._hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), 'm')
def _resolve_state(self, entity_id_or_state):
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
elif isinstance(entity_id_or_state, str):
return self._hass.states.get(entity_id_or_state)
return None
def forgiving_round(value, precision=0):
"""Rounding filter that accepts strings."""
try:
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(
dt_util.utc_from_timestamp(value)).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
ENV = TemplateEnvironment()
ENV.filters['round'] = forgiving_round
ENV.filters['multiply'] = multiply
ENV.filters['timestamp_custom'] = timestamp_custom
ENV.filters['timestamp_local'] = timestamp_local
ENV.filters['timestamp_utc'] = timestamp_utc
ENV.filters['is_defined'] = fail_when_undefined
ENV.globals['float'] = forgiving_float
ENV.globals['now'] = dt_util.now
ENV.globals['utcnow'] = dt_util.utcnow
ENV.globals['as_timestamp'] = dt_util.as_timestamp
ENV.globals['relative_time'] = dt_util.get_age
ENV.globals['strptime'] = strptime
|
|
"""Test Axis device."""
from copy import deepcopy
import json
from unittest import mock
import axis as axislib
from axis.api_discovery import URL as API_DISCOVERY_URL
from axis.basic_device_info import URL as BASIC_DEVICE_INFO_URL
from axis.event_stream import OPERATION_INITIALIZED
from axis.light_control import URL as LIGHT_CONTROL_URL
from axis.mqtt import URL_CLIENT as MQTT_CLIENT_URL
from axis.param_cgi import (
BRAND as BRAND_URL,
INPUT as INPUT_URL,
IOPORT as IOPORT_URL,
OUTPUT as OUTPUT_URL,
PROPERTIES as PROPERTIES_URL,
STREAM_PROFILES as STREAM_PROFILES_URL,
)
from axis.port_management import URL as PORT_MANAGEMENT_URL
import pytest
from homeassistant import config_entries
from homeassistant.components import axis
from homeassistant.components.axis.const import (
CONF_EVENTS,
CONF_MODEL,
DOMAIN as AXIS_DOMAIN,
)
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.const import (
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry, async_fire_mqtt_message
MAC = "00408C12345"
MODEL = "model"
NAME = "name"
ENTRY_OPTIONS = {CONF_EVENTS: True}
ENTRY_CONFIG = {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "root",
CONF_PASSWORD: "pass",
CONF_PORT: 80,
CONF_MAC: MAC,
CONF_MODEL: MODEL,
CONF_NAME: NAME,
}
API_DISCOVERY_RESPONSE = {
"method": "getApiList",
"apiVersion": "1.0",
"data": {
"apiList": [
{"id": "api-discovery", "version": "1.0", "name": "API Discovery Service"},
{"id": "param-cgi", "version": "1.0", "name": "Legacy Parameter Handling"},
]
},
}
API_DISCOVERY_BASIC_DEVICE_INFO = {
"id": "basic-device-info",
"version": "1.1",
"name": "Basic Device Information",
}
API_DISCOVERY_MQTT = {"id": "mqtt-client", "version": "1.0", "name": "MQTT Client API"}
API_DISCOVERY_PORT_MANAGEMENT = {
"id": "io-port-management",
"version": "1.0",
"name": "IO Port Management",
}
BASIC_DEVICE_INFO_RESPONSE = {
"apiVersion": "1.1",
"data": {
"propertyList": {
"ProdNbr": "M1065-LW",
"ProdType": "Network Camera",
"SerialNumber": "00408C12345",
"Version": "9.80.1",
}
},
}
LIGHT_CONTROL_RESPONSE = {
"apiVersion": "1.1",
"method": "getLightInformation",
"data": {
"items": [
{
"lightID": "led0",
"lightType": "IR",
"enabled": True,
"synchronizeDayNightMode": True,
"lightState": False,
"automaticIntensityMode": False,
"automaticAngleOfIlluminationMode": False,
"nrOfLEDs": 1,
"error": False,
"errorInfo": "",
}
]
},
}
MQTT_CLIENT_RESPONSE = {
"apiVersion": "1.0",
"context": "some context",
"method": "getClientStatus",
"data": {"status": {"state": "active", "connectionStatus": "Connected"}},
}
PORT_MANAGEMENT_RESPONSE = {
"apiVersion": "1.0",
"method": "getPorts",
"data": {
"numberOfPorts": 1,
"items": [
{
"port": "0",
"configurable": False,
"usage": "",
"name": "PIR sensor",
"direction": "input",
"state": "open",
"normalState": "open",
}
],
},
}
BRAND_RESPONSE = """root.Brand.Brand=AXIS
root.Brand.ProdFullName=AXIS M1065-LW Network Camera
root.Brand.ProdNbr=M1065-LW
root.Brand.ProdShortName=AXIS M1065-LW
root.Brand.ProdType=Network Camera
root.Brand.ProdVariant=
root.Brand.WebURL=http://www.axis.com
"""
PORTS_RESPONSE = """root.Input.NbrOfInputs=1
root.IOPort.I0.Configurable=no
root.IOPort.I0.Direction=input
root.IOPort.I0.Input.Name=PIR sensor
root.IOPort.I0.Input.Trig=closed
root.Output.NbrOfOutputs=0
"""
PROPERTIES_RESPONSE = """root.Properties.API.HTTP.Version=3
root.Properties.API.Metadata.Metadata=yes
root.Properties.API.Metadata.Version=1.0
root.Properties.Firmware.BuildDate=Feb 15 2019 09:42
root.Properties.Firmware.BuildNumber=26
root.Properties.Firmware.Version=9.10.1
root.Properties.Image.Format=jpeg,mjpeg,h264
root.Properties.Image.NbrOfViews=2
root.Properties.Image.Resolution=1920x1080,1280x960,1280x720,1024x768,1024x576,800x600,640x480,640x360,352x240,320x240
root.Properties.Image.Rotation=0,180
root.Properties.System.SerialNumber=00408C12345
"""
STREAM_PROFILES_RESPONSE = """root.StreamProfile.MaxGroups=26
root.StreamProfile.S0.Description=profile_1_description
root.StreamProfile.S0.Name=profile_1
root.StreamProfile.S0.Parameters=videocodec=h264
root.StreamProfile.S1.Description=profile_2_description
root.StreamProfile.S1.Name=profile_2
root.StreamProfile.S1.Parameters=videocodec=h265
"""
def vapix_session_request(session, url, **kwargs):
"""Return data based on url."""
if API_DISCOVERY_URL in url:
return json.dumps(API_DISCOVERY_RESPONSE)
if BASIC_DEVICE_INFO_URL in url:
return json.dumps(BASIC_DEVICE_INFO_RESPONSE)
if LIGHT_CONTROL_URL in url:
return json.dumps(LIGHT_CONTROL_RESPONSE)
if MQTT_CLIENT_URL in url:
return json.dumps(MQTT_CLIENT_RESPONSE)
if PORT_MANAGEMENT_URL in url:
return json.dumps(PORT_MANAGEMENT_RESPONSE)
if BRAND_URL in url:
return BRAND_RESPONSE
if IOPORT_URL in url or INPUT_URL in url or OUTPUT_URL in url:
return PORTS_RESPONSE
if PROPERTIES_URL in url:
return PROPERTIES_RESPONSE
if STREAM_PROFILES_URL in url:
return STREAM_PROFILES_RESPONSE
async def setup_axis_integration(hass, config=ENTRY_CONFIG, options=ENTRY_OPTIONS):
"""Create the Axis device."""
config_entry = MockConfigEntry(
domain=AXIS_DOMAIN,
data=deepcopy(config),
connection_class=config_entries.CONN_CLASS_LOCAL_PUSH,
options=deepcopy(options),
entry_id="1",
version=2,
)
config_entry.add_to_hass(hass)
with patch("axis.vapix.session_request", new=vapix_session_request), patch(
"axis.rtsp.RTSPClient.start", return_value=True,
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return hass.data[AXIS_DOMAIN].get(config_entry.unique_id)
async def test_device_setup(hass):
"""Successful setup."""
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
device = await setup_axis_integration(hass)
assert device.api.vapix.firmware_version == "9.10.1"
assert device.api.vapix.product_number == "M1065-LW"
assert device.api.vapix.product_type == "Network Camera"
assert device.api.vapix.serial_number == "00408C12345"
entry = device.config_entry
assert len(forward_entry_setup.mock_calls) == 4
assert forward_entry_setup.mock_calls[0][1] == (entry, "binary_sensor")
assert forward_entry_setup.mock_calls[1][1] == (entry, "camera")
assert forward_entry_setup.mock_calls[2][1] == (entry, "light")
assert forward_entry_setup.mock_calls[3][1] == (entry, "switch")
assert device.host == ENTRY_CONFIG[CONF_HOST]
assert device.model == ENTRY_CONFIG[CONF_MODEL]
assert device.name == ENTRY_CONFIG[CONF_NAME]
assert device.serial == ENTRY_CONFIG[CONF_MAC]
async def test_device_info(hass):
"""Verify other path of device information works."""
api_discovery = deepcopy(API_DISCOVERY_RESPONSE)
api_discovery["data"]["apiList"].append(API_DISCOVERY_BASIC_DEVICE_INFO)
with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):
device = await setup_axis_integration(hass)
assert device.api.vapix.firmware_version == "9.80.1"
assert device.api.vapix.product_number == "M1065-LW"
assert device.api.vapix.product_type == "Network Camera"
assert device.api.vapix.serial_number == "00408C12345"
async def test_device_support_mqtt(hass, mqtt_mock):
"""Successful setup."""
api_discovery = deepcopy(API_DISCOVERY_RESPONSE)
api_discovery["data"]["apiList"].append(API_DISCOVERY_MQTT)
with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):
await setup_axis_integration(hass)
mqtt_mock.async_subscribe.assert_called_with(f"{MAC}/#", mock.ANY, 0, "utf-8")
topic = f"{MAC}/event/tns:onvif/Device/tns:axis/Sensor/PIR/$source/sensor/0"
message = b'{"timestamp": 1590258472044, "topic": "onvif:Device/axis:Sensor/PIR", "message": {"source": {"sensor": "0"}, "key": {}, "data": {"state": "1"}}}'
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 0
async_fire_mqtt_message(hass, topic, message)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 1
pir = hass.states.get(f"binary_sensor.{NAME}_pir_0")
assert pir.state == "on"
assert pir.name == f"{NAME} PIR 0"
async def test_update_address(hass):
"""Test update address works."""
device = await setup_axis_integration(hass)
assert device.api.config.host == "1.2.3.4"
await hass.config_entries.flow.async_init(
AXIS_DOMAIN,
data={
"host": "2.3.4.5",
"port": 80,
"hostname": "name",
"properties": {"macaddress": MAC},
},
context={"source": "zeroconf"},
)
await hass.async_block_till_done()
assert device.api.config.host == "2.3.4.5"
async def test_device_unavailable(hass):
"""Successful setup."""
device = await setup_axis_integration(hass)
device.async_connection_status_callback(status=False)
assert not device.available
async def test_device_reset(hass):
"""Successfully reset device."""
device = await setup_axis_integration(hass)
result = await device.async_reset()
assert result is True
async def test_device_not_accessible(hass):
"""Failed setup schedules a retry of setup."""
with patch.object(axis.device, "get_device", side_effect=axis.errors.CannotConnect):
await setup_axis_integration(hass)
assert hass.data[AXIS_DOMAIN] == {}
async def test_device_unknown_error(hass):
"""Unknown errors are handled."""
with patch.object(axis.device, "get_device", side_effect=Exception):
await setup_axis_integration(hass)
assert hass.data[AXIS_DOMAIN] == {}
async def test_new_event_sends_signal(hass):
"""Make sure that new event send signal."""
entry = Mock()
entry.data = ENTRY_CONFIG
axis_device = axis.device.AxisNetworkDevice(hass, entry)
with patch.object(axis.device, "async_dispatcher_send") as mock_dispatch_send:
axis_device.async_event_callback(action=OPERATION_INITIALIZED, event_id="event")
await hass.async_block_till_done()
assert len(mock_dispatch_send.mock_calls) == 1
assert len(mock_dispatch_send.mock_calls[0]) == 3
async def test_shutdown():
"""Successful shutdown."""
hass = Mock()
entry = Mock()
entry.data = ENTRY_CONFIG
axis_device = axis.device.AxisNetworkDevice(hass, entry)
axis_device.api = Mock()
axis_device.shutdown(None)
assert len(axis_device.api.stream.stop.mock_calls) == 1
async def test_get_device_fails(hass):
"""Device unauthorized yields authentication required error."""
with patch(
"axis.vapix.session_request", side_effect=axislib.Unauthorized
), pytest.raises(axis.errors.AuthenticationRequired):
await axis.device.get_device(hass, host="", port="", username="", password="")
async def test_get_device_device_unavailable(hass):
"""Device unavailable yields cannot connect error."""
with patch(
"axis.vapix.session_request", side_effect=axislib.RequestError
), pytest.raises(axis.errors.CannotConnect):
await axis.device.get_device(hass, host="", port="", username="", password="")
async def test_get_device_unknown_error(hass):
"""Device yield unknown error."""
with patch(
"axis.vapix.session_request", side_effect=axislib.AxisException
), pytest.raises(axis.errors.AuthenticationRequired):
await axis.device.get_device(hass, host="", port="", username="", password="")
|
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import operator
import os
import re
import shutil
import subprocess
import tempfile
import time
import h5py
import numpy as np
import PIL.Image
from .train import TrainTask
import digits
from digits import utils
from digits.config import config_value
from digits.utils import subclass, override, constants
# Must import after importing digit.config
import caffe_pb2
# NOTE: Increment this everytime the pickled object changes
PICKLE_VERSION = 1
# Constants
TORCH_MODEL_FILE = 'model.lua'
TORCH_SNAPSHOT_PREFIX = 'snapshot'
@subclass
class TorchTrainTask(TrainTask):
"""
Trains a torch model
"""
TORCH_LOG = 'torch_output.log'
def __init__(self, **kwargs):
"""
Arguments:
network -- a NetParameter defining the network
"""
super(TorchTrainTask, self).__init__(**kwargs)
# save network description to file
with open(os.path.join(self.job_dir, TORCH_MODEL_FILE), "w") as outfile:
outfile.write(self.network)
self.pickver_task_torch_train = PICKLE_VERSION
self.current_epoch = 0
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
self.image_mean = None
self.classifier = None
self.solver = None
self.model_file = TORCH_MODEL_FILE
self.train_file = constants.TRAIN_DB
self.val_file = constants.VAL_DB
self.snapshot_prefix = TORCH_SNAPSHOT_PREFIX
self.log_file = self.TORCH_LOG
def __getstate__(self):
state = super(TorchTrainTask, self).__getstate__()
# Don't pickle these things
if 'labels' in state:
del state['labels']
if 'image_mean' in state:
del state['image_mean']
if 'classifier' in state:
del state['classifier']
if 'torch_log' in state:
del state['torch_log']
return state
def __setstate__(self, state):
super(TorchTrainTask, self).__setstate__(state)
# Make changes to self
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
# These things don't get pickled
self.image_mean = None
self.classifier = None
### Task overrides
@override
def name(self):
return 'Train Torch Model'
@override
def before_run(self):
super(TorchTrainTask, self).before_run()
self.torch_log = open(self.path(self.TORCH_LOG), 'a')
self.saving_snapshot = False
self.receiving_train_output = False
self.receiving_val_output = False
self.last_train_update = None
self.displaying_network = False
self.temp_unrecognized_output = []
return True
def create_mean_file(self):
filename = os.path.join(self.job_dir, constants.MEAN_FILE_IMAGE)
# don't recreate file if it already exists
if not os.path.exists(filename):
mean_file = self.dataset.get_mean_file()
assert mean_file != None and mean_file.endswith('.binaryproto'), 'Mean subtraction required but dataset has no mean file in .binaryproto format'
blob = caffe_pb2.BlobProto()
with open(self.dataset.path(mean_file),'rb') as infile:
blob.ParseFromString(infile.read())
data = np.array(blob.data, dtype=np.uint8).reshape(blob.channels, blob.height, blob.width)
if blob.channels == 3:
# converting from BGR to RGB
data = data[[2,1,0],...] # channel swap
# convert to (height, width, channels)
data = data.transpose((1,2,0))
else:
assert blob.channels == 1
# convert to (height, width)
data = data[0]
# save to file
image = PIL.Image.fromarray(data)
image.save(filename)
return filename
@override
def task_arguments(self, resources, env):
if config_value('torch_root') == '<PATHS>':
torch_bin = 'th'
else:
torch_bin = os.path.join(config_value('torch_root'), 'bin', 'th')
dataset_backend = self.dataset.get_backend()
assert dataset_backend=='lmdb' or dataset_backend=='hdf5'
args = [torch_bin,
os.path.join(os.path.dirname(os.path.dirname(digits.__file__)),'tools','torch','wrapper.lua'),
'main.lua',
'--network=%s' % self.model_file.split(".")[0],
'--epoch=%d' % int(self.train_epochs),
'--networkDirectory=%s' % self.job_dir,
'--save=%s' % self.job_dir,
'--snapshotPrefix=%s' % self.snapshot_prefix,
'--snapshotInterval=%s' % self.snapshot_interval,
'--learningRate=%s' % self.learning_rate,
'--policy=%s' % str(self.lr_policy['policy']),
'--dbbackend=%s' % dataset_backend
]
if self.batch_size is not None:
args.append('--batchSize=%d' % self.batch_size)
if self.use_mean != 'none':
filename = self.create_mean_file()
args.append('--mean=%s' % filename)
if hasattr(self.dataset, 'labels_file'):
args.append('--labels=%s' % self.dataset.path(self.dataset.labels_file))
train_feature_db_path = self.dataset.get_feature_db_path(constants.TRAIN_DB)
train_label_db_path = self.dataset.get_label_db_path(constants.TRAIN_DB)
val_feature_db_path = self.dataset.get_feature_db_path(constants.VAL_DB)
val_label_db_path = self.dataset.get_label_db_path(constants.VAL_DB)
args.append('--train=%s' % train_feature_db_path)
if train_label_db_path:
args.append('--train_labels=%s' % train_label_db_path)
if val_feature_db_path:
args.append('--validation=%s' % val_feature_db_path)
if val_label_db_path:
args.append('--validation_labels=%s' % val_label_db_path)
#learning rate policy input parameters
if self.lr_policy['policy'] == 'fixed':
pass
elif self.lr_policy['policy'] == 'step':
args.append('--gamma=%s' % self.lr_policy['gamma'])
args.append('--stepvalues=%s' % self.lr_policy['stepsize'])
elif self.lr_policy['policy'] == 'multistep':
args.append('--stepvalues=%s' % self.lr_policy['stepvalue'])
args.append('--gamma=%s' % self.lr_policy['gamma'])
elif self.lr_policy['policy'] == 'exp':
args.append('--gamma=%s' % self.lr_policy['gamma'])
elif self.lr_policy['policy'] == 'inv':
args.append('--gamma=%s' % self.lr_policy['gamma'])
args.append('--power=%s' % self.lr_policy['power'])
elif self.lr_policy['policy'] == 'poly':
args.append('--power=%s' % self.lr_policy['power'])
elif self.lr_policy['policy'] == 'sigmoid':
args.append('--stepvalues=%s' % self.lr_policy['stepsize'])
args.append('--gamma=%s' % self.lr_policy['gamma'])
if self.shuffle:
args.append('--shuffle=yes')
if self.crop_size:
args.append('--crop=yes')
args.append('--croplen=%d' % self.crop_size)
if self.use_mean == 'pixel':
args.append('--subtractMean=pixel')
elif self.use_mean == 'image':
args.append('--subtractMean=image')
else:
args.append('--subtractMean=none')
if self.random_seed is not None:
args.append('--seed=%s' % self.random_seed)
if self.solver_type == 'SGD':
args.append('--optimization=sgd')
elif self.solver_type == 'NESTEROV':
args.append('--optimization=nag')
elif self.solver_type == 'ADAGRAD':
args.append('--optimization=adagrad')
elif self.solver_type == 'RMSPROP':
args.append('--optimization=rmsprop')
elif self.solver_type == 'ADADELTA':
args.append('--optimization=adadelta')
elif self.solver_type == 'ADAM':
args.append('--optimization=adam')
else:
raise ValueError('Unknown solver_type %s' % self.solver_type)
if self.val_interval > 0:
args.append('--interval=%s' % self.val_interval)
if 'gpus' in resources:
identifiers = []
for identifier, value in resources['gpus']:
identifiers.append(identifier)
# make all selected GPUs visible to the Torch 'th' process.
# don't make other GPUs visible though since Torch will load
# CUDA libraries and allocate memory on all visible GPUs by
# default.
env['CUDA_VISIBLE_DEVICES'] = ','.join(identifiers)
# switch to GPU mode
args.append('--type=cuda')
else:
# switch to CPU mode
args.append('--type=float')
if self.pretrained_model:
args.append('--weights=%s' % self.path(self.pretrained_model))
return args
@override
def process_output(self, line):
regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE) #TODO: need to include regular expression for MAC color codes
line=regex.sub('', line).strip()
self.torch_log.write('%s\n' % line)
self.torch_log.flush()
# parse torch output
timestamp, level, message = self.preprocess_output_torch(line)
# return false when unrecognized output is encountered
if not level:
# network display in progress
if self.displaying_network:
self.temp_unrecognized_output.append(line)
return True
return False
if not message:
return True
# network display ends
if self.displaying_network:
if message.startswith('Network definition ends'):
self.temp_unrecognized_output = []
self.displaying_network = False
return True
# by default Lua prints infinite numbers as 'inf' however Torch tensor may use 'nan' to represent infinity
float_exp = '([-]?inf|nan|[-+]?[0-9]*\.?[0-9]+(e[-+]?[0-9]+)?)'
# loss and learning rate updates
match = re.match(r'Training \(epoch (\d+\.?\d*)\): \w*loss\w* = %s, lr = %s' % (float_exp, float_exp), message)
if match:
index = float(match.group(1))
l = match.group(2)
assert not('inf' in l or 'nan' in l), 'Network reported %s for training loss. Try decreasing your learning rate.' % l
l = float(l)
lr = match.group(4)
lr = float(lr)
# epoch updates
self.send_progress_update(index)
self.save_train_output('loss', 'SoftmaxWithLoss', l)
self.save_train_output('learning_rate', 'LearningRate', lr)
self.logger.debug(message)
return True
# testing loss and accuracy updates
match = re.match(r'Validation \(epoch (\d+\.?\d*)\): \w*loss\w* = %s(, accuracy = %s)?' % (float_exp,float_exp), message, flags=re.IGNORECASE)
if match:
index = float(match.group(1))
l = match.group(2)
a = match.group(5)
# note: validation loss could have diverged however if the training loss is still finite, there is a slim possibility
# that the network keeps learning something useful, so we don't treat infinite validation loss as a fatal error
if not('inf' in l or 'nan' in l):
l = float(l)
self.logger.debug('Network validation loss #%s: %s' % (index, l))
# epoch updates
self.send_progress_update(index)
self.save_val_output('loss', 'SoftmaxWithLoss', l)
if a and a.lower() != 'inf' and a.lower() != '-inf':
a = float(a)
self.logger.debug('Network accuracy #%s: %s' % (index, a))
self.save_val_output('accuracy', 'Accuracy', a)
return True
# snapshot saved
if self.saving_snapshot:
if not message.startswith('Snapshot saved'):
self.logger.warning('Torch output format seems to have changed. Expected "Snapshot saved..." after "Snapshotting to..."')
else:
self.logger.info('Snapshot saved.') # to print file name here, you can use "message"
self.detect_snapshots()
self.send_snapshot_update()
self.saving_snapshot = False
return True
# snapshot starting
match = re.match(r'Snapshotting to (.*)\s*$', message)
if match:
self.saving_snapshot = True
return True
# network display starting
if message.startswith('Network definition:'):
self.displaying_network = True
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
# skip remaining info and warn messages
return True
@staticmethod
def preprocess_output_torch(line):
"""
Takes line of output and parses it according to caffe's output format
Returns (timestamp, level, message) or (None, None, None)
"""
# NOTE: This must change when the logging format changes
# LMMDD HH:MM:SS.MICROS pid file:lineno] message
match = re.match(r'(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})\s\[(\w+)\s*]\s+(\S.*)$', line)
if match:
timestamp = time.mktime(time.strptime(match.group(1), '%Y-%m-%d %H:%M:%S'))
level = match.group(2)
message = match.group(3)
if level == 'INFO':
level = 'info'
elif level == 'WARNING':
level = 'warning'
elif level == 'ERROR':
level = 'error'
elif level == 'FAIL': #FAIL
level = 'critical'
return (timestamp, level, message)
else:
#self.logger.warning('Unrecognized task output "%s"' % line)
return (None, None, None)
def send_snapshot_update(self):
"""
Sends socketio message about the snapshot list
"""
# TODO: move to TrainTask
from digits.webapp import socketio
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'snapshots',
'data': self.snapshot_list(),
},
namespace='/jobs',
room=self.job_id,
)
### TrainTask overrides
@override
def after_run(self):
if self.temp_unrecognized_output:
if self.traceback:
self.traceback = self.traceback + ('\n'.join(self.temp_unrecognized_output))
else:
self.traceback = '\n'.join(self.temp_unrecognized_output)
self.temp_unrecognized_output = []
self.torch_log.close()
@override
def after_runtime_error(self):
if os.path.exists(self.path(self.TORCH_LOG)):
output = subprocess.check_output(['tail', '-n40', self.path(self.TORCH_LOG)])
lines = []
for line in output.split('\n'):
# parse torch header
timestamp, level, message = self.preprocess_output_torch(line)
if message:
lines.append(message)
# return the last 20 lines
traceback = '\n\nLast output:\n' + '\n'.join(lines[len(lines)-20:]) if len(lines)>0 else ''
if self.traceback:
self.traceback = self.traceback + traceback
else:
self.traceback = traceback
if 'DIGITS_MODE_TEST' in os.environ:
print output
@override
def detect_snapshots(self):
self.snapshots = []
snapshot_dir = os.path.join(self.job_dir, os.path.dirname(self.snapshot_prefix))
snapshots = []
for filename in os.listdir(snapshot_dir):
# find models
match = re.match(r'%s_(\d+)\.?(\d*)(_Weights|_Model)\.t7' % os.path.basename(self.snapshot_prefix), filename)
if match:
epoch = 0
if match.group(2) == '':
epoch = int(match.group(1))
else:
epoch = float(match.group(1) + '.' + match.group(2))
snapshots.append( (
os.path.join(snapshot_dir, filename),
epoch
)
)
self.snapshots = sorted(snapshots, key=lambda tup: tup[1])
return len(self.snapshots) > 0
@override
def est_next_snapshot(self):
# TODO: Currently this function is not in use. Probably in future we may have to implement this
return None
@override
def infer_one(self, data, snapshot_epoch=None, layers=None, gpu=None):
return self.infer_one_image(data,
snapshot_epoch=snapshot_epoch,
layers=layers,
gpu=gpu,
)
def infer_one_image(self, image, snapshot_epoch=None, layers=None, gpu=None):
"""
Classify an image
Returns (predictions, visualizations)
predictions -- an array of [ (label, confidence), ...] for each label, sorted by confidence
visualizations -- an array of (layer_name, activations, weights) for the specified layers
Returns (None, None) if something goes wrong
Arguments:
image -- a np.array
Keyword arguments:
snapshot_epoch -- which snapshot to use
layers -- which layer activation[s] and weight[s] to visualize
"""
temp_image_handle, temp_image_path = tempfile.mkstemp(suffix='.png')
os.close(temp_image_handle)
image = PIL.Image.fromarray(image)
try:
image.save(temp_image_path, format='png')
except KeyError:
error_message = 'Unable to save file to "%s"' % temp_image_path
self.logger.error(error_message)
raise digits.inference.errors.InferenceError(error_message)
if config_value('torch_root') == '<PATHS>':
torch_bin = 'th'
else:
torch_bin = os.path.join(config_value('torch_root'), 'bin', 'th')
file_to_load = self.get_snapshot(snapshot_epoch)
args = [torch_bin,
os.path.join(os.path.dirname(os.path.dirname(digits.__file__)),'tools','torch','wrapper.lua'),
'test.lua',
'--image=%s' % temp_image_path,
'--network=%s' % self.model_file.split(".")[0],
'--networkDirectory=%s' % self.job_dir,
'--snapshot=%s' % file_to_load,
'--allPredictions=yes',
]
if hasattr(self.dataset, 'labels_file'):
args.append('--labels=%s' % self.dataset.path(self.dataset.labels_file))
if self.use_mean != 'none':
filename = self.create_mean_file()
args.append('--mean=%s' % os.path.join(self.job_dir, constants.MEAN_FILE_IMAGE))
if self.use_mean == 'pixel':
args.append('--subtractMean=pixel')
elif self.use_mean == 'image':
args.append('--subtractMean=image')
else:
args.append('--subtractMean=none')
if self.crop_size:
args.append('--crop=yes')
args.append('--croplen=%d' % self.crop_size)
if layers=='all':
args.append('--visualization=yes')
args.append('--save=%s' % self.job_dir)
# Convert them all to strings
args = [str(x) for x in args]
regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE) #TODO: need to include regular expression for MAC color codes
self.logger.info('%s classify one task started.' % self.get_framework_id())
unrecognized_output = []
predictions = []
self.visualization_file = None
env = os.environ.copy()
if gpu is not None:
args.append('--type=cuda')
# make only the selected GPU visible
env['CUDA_VISIBLE_DEVICES'] = "%d" % gpu
else:
args.append('--type=float')
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=True,
env=env,
)
try:
while p.poll() is None:
for line in utils.nonblocking_readlines(p.stdout):
if self.aborted.is_set():
p.terminate()
raise digits.inference.errors.InferenceError('%s classify one task got aborted. error code - %d' % (self.get_framework_id(), p.returncode))
if line is not None:
# Remove color codes and whitespace
line=regex.sub('', line).strip()
if line:
if not self.process_test_output(line, predictions, 'one'):
self.logger.warning('%s classify one task unrecognized input: %s' % (self.get_framework_id(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
except Exception as e:
if p.poll() is None:
p.terminate()
error_message = ''
if type(e) == digits.inference.errors.InferenceError:
error_message = e.__str__()
else:
error_message = '%s classify one task failed with error code %d \n %s' % (self.get_framework_id(), p.returncode, str(e))
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
finally:
self.after_test_run(temp_image_path)
if p.returncode != 0:
error_message = '%s classify one task failed with error code %d' % (self.get_framework_id(), p.returncode)
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
else:
self.logger.info('%s classify one task completed.' % self.get_framework_id())
predictions = {'output': np.array(predictions)}
visualizations = []
if layers=='all' and self.visualization_file:
vis_db = h5py.File(self.visualization_file, 'r')
# the HDF5 database is organized as follows:
# <root>
# |- layers
# |- 1
# | |- name
# | |- activations
# | |- weights
# |- 2
for layer_id,layer in vis_db['layers'].items():
layer_desc = layer['name'][...].tostring()
if 'Sequential' in layer_desc or 'Parallel' in layer_desc:
# ignore containers
continue
idx = int(layer_id)
# activations
if 'activations' in layer:
data = np.array(layer['activations'][...])
# skip batch dimension
if len(data.shape)>1 and data.shape[0]==1:
data = data[0]
vis = utils.image.get_layer_vis_square(data)
mean, std, hist = self.get_layer_statistics(data)
visualizations.append(
{
'id': idx,
'name': layer_desc,
'vis_type': 'Activations',
'vis': vis,
'data_stats': {
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
}
}
)
# weights
if 'weights' in layer:
data = np.array(layer['weights'][...])
if 'Linear' not in layer_desc:
vis = utils.image.get_layer_vis_square(data)
else:
# Linear (inner product) layers have too many weights
# to display
vis = None
mean, std, hist = self.get_layer_statistics(data)
parameter_count = reduce(operator.mul, data.shape, 1)
if 'bias' in layer:
bias = np.array(layer['bias'][...])
parameter_count += reduce(operator.mul, bias.shape, 1)
visualizations.append(
{
'id': idx,
'name': layer_desc,
'vis_type': 'Weights',
'vis': vis,
'param_count': parameter_count,
'data_stats': {
'shape': data.shape,
'mean': mean,
'stddev': std,
'histogram': hist,
}
}
)
# sort by layer ID
visualizations = sorted(visualizations,key=lambda x:x['id'])
return (predictions,visualizations)
def get_layer_statistics(self, data):
"""
Returns statistics for the given layer data:
(mean, standard deviation, histogram)
histogram -- [y, x, ticks]
Arguments:
data -- a np.ndarray
"""
# XXX These calculations can be super slow
mean = np.mean(data)
std = np.std(data)
y, x = np.histogram(data, bins=20)
y = list(y)
ticks = x[[0,len(x)/2,-1]]
x = [(x[i]+x[i+1])/2.0 for i in xrange(len(x)-1)]
ticks = list(ticks)
return (mean, std, [y, x, ticks])
def after_test_run(self, temp_image_path):
try:
os.remove(temp_image_path)
except OSError:
pass
def process_test_output(self, line, predictions, test_category):
# parse torch output
timestamp, level, message = self.preprocess_output_torch(line)
# return false when unrecognized output is encountered
if not (level or message):
return False
if not message:
return True
float_exp = '([-]?inf|nan|[-+]?[0-9]*\.?[0-9]+(e[-+]?[0-9]+)?)'
# format of output while testing single image
match = re.match(r'For image \d+, predicted class \d+: \d+ \((.*?)\) %s' % (float_exp), message)
if match:
label = match.group(1)
confidence = match.group(2)
assert not('inf' in confidence or 'nan' in confidence), 'Network reported %s for confidence value. Please check image and network' % label
confidence = float(confidence)
predictions.append((label, confidence))
return True
# format of output while testing multiple images
match = re.match(r'Predictions for image \d+: (.*)', message)
if match:
values = match.group(1).strip()
# 'values' should contain a JSON representation of
# the prediction
predictions.append(eval(values))
return True
# path to visualization file
match = re.match(r'Saving visualization to (.*)', message)
if match:
self.visualization_file = match.group(1).strip()
return True
# displaying info and warn messages as we aren't maintaining separate log file for model testing
if level == 'info':
self.logger.debug('%s classify %s task : %s' % (self.get_framework_id(), test_category, message))
return True
if level == 'warning':
self.logger.warning('%s classify %s task : %s' % (self.get_framework_id(), test_category, message))
return True
if level in ['error', 'critical']:
raise digits.inference.errors.InferenceError('%s classify %s task failed with error message - %s' % (self.get_framework_id(), test_category, message))
return True # control never reach this line. It can be removed.
@override
def infer_many(self, data, snapshot_epoch=None, gpu=None):
return self.infer_many_images(data, snapshot_epoch=snapshot_epoch, gpu=gpu)
def infer_many_images(self, images, snapshot_epoch=None, gpu=None):
"""
Returns (labels, results):
labels -- an array of strings
results -- a 2D np array:
[
[image0_label0_confidence, image0_label1_confidence, ...],
[image1_label0_confidence, image1_label1_confidence, ...],
...
]
Arguments:
images -- a list of np.arrays
Keyword arguments:
snapshot_epoch -- which snapshot to use
"""
# create a temporary folder to store images and a temporary file
# to store a list of paths to the images
temp_dir_path = tempfile.mkdtemp()
try: # this try...finally clause is used to clean up the temp directory in any case
temp_imglist_handle, temp_imglist_path = tempfile.mkstemp(dir=temp_dir_path, suffix='.txt')
for image in images:
temp_image_handle, temp_image_path = tempfile.mkstemp(
dir=temp_dir_path, suffix='.png')
image = PIL.Image.fromarray(image)
try:
image.save(temp_image_path, format='png')
except KeyError:
error_message = 'Unable to save file to "%s"' % temp_image_path
self.logger.error(error_message)
raise digits.inference.errors.InferenceError(error_message)
os.write(temp_imglist_handle, "%s\n" % temp_image_path)
os.close(temp_image_handle)
os.close(temp_imglist_handle)
if config_value('torch_root') == '<PATHS>':
torch_bin = 'th'
else:
torch_bin = os.path.join(config_value('torch_root'), 'bin', 'th')
file_to_load = self.get_snapshot(snapshot_epoch)
args = [torch_bin,
os.path.join(os.path.dirname(os.path.dirname(digits.__file__)),'tools','torch','wrapper.lua'),
'test.lua',
'--testMany=yes',
'--allPredictions=yes', #all predictions are grabbed and formatted as required by DIGITS
'--image=%s' % str(temp_imglist_path),
'--network=%s' % self.model_file.split(".")[0],
'--networkDirectory=%s' % self.job_dir,
'--snapshot=%s' % file_to_load,
]
if hasattr(self.dataset, 'labels_file'):
args.append('--labels=%s' % self.dataset.path(self.dataset.labels_file))
if self.use_mean != 'none':
filename = self.create_mean_file()
args.append('--mean=%s' % os.path.join(self.job_dir, constants.MEAN_FILE_IMAGE))
if self.use_mean == 'pixel':
args.append('--subtractMean=pixel')
elif self.use_mean == 'image':
args.append('--subtractMean=image')
else:
args.append('--subtractMean=none')
if self.crop_size:
args.append('--crop=yes')
args.append('--croplen=%d' % self.crop_size)
# Convert them all to strings
args = [str(x) for x in args]
regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE) #TODO: need to include regular expression for MAC color codes
self.logger.info('%s classify many task started.' % self.name())
env = os.environ.copy()
if gpu is not None:
args.append('--type=cuda')
# make only the selected GPU visible
env['CUDA_VISIBLE_DEVICES'] = "%d" % gpu
else:
args.append('--type=float')
unrecognized_output = []
predictions = []
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=True,
env=env
)
try:
while p.poll() is None:
for line in utils.nonblocking_readlines(p.stdout):
if self.aborted.is_set():
p.terminate()
raise digits.inference.errors.InferenceError('%s classify many task got aborted. error code - %d' % (self.get_framework_id(), p.returncode))
if line is not None:
# Remove whitespace and color codes. color codes are appended to beginning and end of line by torch binary i.e., 'th'. Check the below link for more information
# https://groups.google.com/forum/#!searchin/torch7/color$20codes/torch7/8O_0lSgSzuA/Ih6wYg9fgcwJ
line=regex.sub('', line).strip()
if line:
if not self.process_test_output(line, predictions, 'many'):
self.logger.warning('%s classify many task unrecognized input: %s' % (self.get_framework_id(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
except Exception as e:
if p.poll() is None:
p.terminate()
error_message = ''
if type(e) == digits.inference.errors.InferenceError:
error_message = e.__str__()
else:
error_message = '%s classify many task failed with error code %d \n %s' % (self.get_framework_id(), p.returncode, str(e))
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
if p.returncode != 0:
error_message = '%s classify many task failed with error code %d' % (self.get_framework_id(), p.returncode)
self.logger.error(error_message)
if unrecognized_output:
unrecognized_output = '\n'.join(unrecognized_output)
error_message = error_message + unrecognized_output
raise digits.inference.errors.InferenceError(error_message)
else:
self.logger.info('%s classify many task completed.' % self.get_framework_id())
finally:
shutil.rmtree(temp_dir_path)
# task.infer_one() expects dictionary in return value
return {'output': np.array(predictions)}
def has_model(self):
"""
Returns True if there is a model that can be used
"""
return len(self.snapshots) != 0
@override
def get_model_files(self):
"""
return paths to model files
"""
return {
"Network": self.model_file
}
@override
def get_network_desc(self):
"""
return text description of network
"""
with open (os.path.join(self.job_dir,TORCH_MODEL_FILE), "r") as infile:
desc = infile.read()
return desc
def get_snapshot(self, epoch):
"""
return snapshot file for specified epoch
"""
file_to_load = None
if not epoch:
epoch = self.snapshots[-1][1]
file_to_load = self.snapshots[-1][0]
else:
for snapshot_file, snapshot_epoch in self.snapshots:
if snapshot_epoch == epoch:
file_to_load = snapshot_file
break
if file_to_load is None:
raise Exception('snapshot not found for epoch "%s"' % epoch)
return file_to_load
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005 Matthew Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
# Matthew Good <trac@matt-good.net>
import cgi
import dircache
import fnmatch
from functools import partial
import gc
import locale
import os
import pkg_resources
from pprint import pformat, pprint
import re
import sys
from genshi.builder import Fragment, tag
from genshi.output import DocType
from genshi.template import TemplateLoader
from trac import __version__ as TRAC_VERSION
from trac.config import BoolOption, ExtensionOption, Option, \
OrderedExtensionsOption
from trac.core import *
from trac.env import open_environment
from trac.loader import get_plugin_info, match_plugins_to_frames
from trac.perm import PermissionCache, PermissionError
from trac.resource import ResourceNotFound
from trac.util import arity, get_frame_info, get_last_traceback, hex_entropy, \
read_file, safe_repr, translation
from trac.util.concurrency import threading
from trac.util.datefmt import format_datetime, localtz, timezone, user_time
from trac.util.text import exception_to_unicode, shorten_line, to_unicode
from trac.util.translation import _, get_negotiated_locale, has_babel, \
safefmt, tag_
from trac.web.api import *
from trac.web.chrome import Chrome
from trac.web.href import Href
from trac.web.session import Session
#: This URL is used for semi-automatic bug reports (see
#: `send_internal_error`). Please modify it to point to your own
#: Trac instance if you distribute a patched version of Trac.
default_tracker = 'http://trac.edgewall.org'
class FakeSession(dict):
sid = None
def save(self):
pass
class FakePerm(dict):
def require(self, *args):
return False
def __call__(self, *args):
return self
class RequestWithSession(Request):
"""A request that saves its associated session when sending the reply."""
def send_response(self, code=200):
if code < 400:
self.session.save()
super(RequestWithSession, self).send_response(code)
class RequestDispatcher(Component):
"""Web request dispatcher.
This component dispatches incoming requests to registered
handlers. Besides, it also takes care of user authentication and
request pre- and post-processing.
"""
required = True
authenticators = ExtensionPoint(IAuthenticator)
handlers = ExtensionPoint(IRequestHandler)
filters = OrderedExtensionsOption('trac', 'request_filters',
IRequestFilter,
doc="""Ordered list of filters to apply to all requests
(''since 0.10'').""")
default_handler = ExtensionOption('trac', 'default_handler',
IRequestHandler, 'WikiModule',
"""Name of the component that handles requests to the base
URL.
Options include `TimelineModule`, `RoadmapModule`,
`BrowserModule`, `QueryModule`, `ReportModule`, `TicketModule`
and `WikiModule`. The default is `WikiModule`. (''since 0.9'')""")
default_timezone = Option('trac', 'default_timezone', '',
"""The default timezone to use""")
default_language = Option('trac', 'default_language', '',
"""The preferred language to use if no user preference has
been set. (''since 0.12.1'')
""")
default_date_format = Option('trac', 'default_date_format', '',
"""The date format. Valid options are 'iso8601' for selecting
ISO 8601 format, or leave it empty which means the default
date format will be inferred from the browser's default
language. (''since 1.0'')
""")
use_xsendfile = BoolOption('trac', 'use_xsendfile', 'false',
"""When true, send a `X-Sendfile` header and no content when sending
files from the filesystem, so that the web server handles the content.
This requires a web server that knows how to handle such a header,
like Apache with `mod_xsendfile` or lighttpd. (''since 1.0'')
""")
# Public API
def authenticate(self, req):
for authenticator in self.authenticators:
authname = authenticator.authenticate(req)
if authname:
return authname
else:
return 'anonymous'
def dispatch(self, req):
"""Find a registered handler that matches the request and let
it process it.
In addition, this method initializes the data dictionary
passed to the the template and adds the web site chrome.
"""
self.log.debug('Dispatching %r', req)
chrome = Chrome(self.env)
# Setup request callbacks for lazily-evaluated properties
req.callbacks.update({
'authname': self.authenticate,
'chrome': chrome.prepare_request,
'perm': self._get_perm,
'session': self._get_session,
'locale': self._get_locale,
'lc_time': self._get_lc_time,
'tz': self._get_timezone,
'form_token': self._get_form_token,
'use_xsendfile': self._get_use_xsendfile,
})
try:
try:
# Select the component that should handle the request
chosen_handler = None
try:
for handler in self.handlers:
if handler.match_request(req):
chosen_handler = handler
break
if not chosen_handler:
if not req.path_info or req.path_info == '/':
chosen_handler = self.default_handler
# pre-process any incoming request, whether a handler
# was found or not
chosen_handler = self._pre_process_request(req,
chosen_handler)
except TracError, e:
raise HTTPInternalError(e)
if not chosen_handler:
if req.path_info.endswith('/'):
# Strip trailing / and redirect
target = req.path_info.rstrip('/').encode('utf-8')
if req.query_string:
target += '?' + req.query_string
req.redirect(req.href + target, permanent=True)
raise HTTPNotFound('No handler matched request to %s',
req.path_info)
req.callbacks['chrome'] = partial(chrome.prepare_request,
handler=chosen_handler)
# Protect against CSRF attacks: we validate the form token
# for all POST requests with a content-type corresponding
# to form submissions
if req.method == 'POST':
ctype = req.get_header('Content-Type')
if ctype:
ctype, options = cgi.parse_header(ctype)
if ctype in ('application/x-www-form-urlencoded',
'multipart/form-data') and \
req.args.get('__FORM_TOKEN') != req.form_token:
if self.env.secure_cookies and req.scheme == 'http':
msg = _('Secure cookies are enabled, you must '
'use https to submit forms.')
else:
msg = _('Do you have cookies enabled?')
raise HTTPBadRequest(_('Missing or invalid form token.'
' %(msg)s', msg=msg))
# Process the request and render the template
resp = chosen_handler.process_request(req)
if resp:
if len(resp) == 2: # old Clearsilver template and HDF data
self.log.error("Clearsilver template are no longer "
"supported (%s)", resp[0])
raise TracError(
_("Clearsilver templates are no longer supported, "
"please contact your Trac administrator."))
# Genshi
template, data, content_type = \
self._post_process_request(req, *resp)
if 'hdfdump' in req.args:
req.perm.require('TRAC_ADMIN')
# debugging helper - no need to render first
out = StringIO()
pprint(data, out)
req.send(out.getvalue(), 'text/plain')
output = chrome.render_template(req, template, data,
content_type)
req.send(output, content_type or 'text/html')
else:
self._post_process_request(req)
except RequestDone:
raise
except:
# post-process the request in case of errors
err = sys.exc_info()
try:
self._post_process_request(req)
except RequestDone:
raise
except Exception, e:
self.log.error("Exception caught while post-processing"
" request: %s",
exception_to_unicode(e, traceback=True))
raise err[0], err[1], err[2]
except PermissionError, e:
raise HTTPForbidden(to_unicode(e))
except ResourceNotFound, e:
raise HTTPNotFound(e)
except TracError, e:
raise HTTPInternalError(e)
# Internal methods
def _get_perm(self, req):
if isinstance(req.session, FakeSession):
return FakePerm()
else:
return PermissionCache(self.env, self.authenticate(req))
def _get_session(self, req):
try:
return Session(self.env, req)
except TracError, e:
self.log.error("can't retrieve session: %s",
exception_to_unicode(e))
return FakeSession()
def _get_locale(self, req):
if has_babel:
preferred = req.session.get('language')
default = self.env.config.get('trac', 'default_language', '')
negotiated = get_negotiated_locale([preferred, default] +
req.languages)
self.log.debug("Negotiated locale: %s -> %s", preferred, negotiated)
return negotiated
def _get_lc_time(self, req):
lc_time = req.session.get('lc_time')
if not lc_time or lc_time == 'locale' and not has_babel:
lc_time = self.default_date_format
if lc_time == 'iso8601':
return 'iso8601'
return req.locale
def _get_timezone(self, req):
try:
return timezone(req.session.get('tz', self.default_timezone
or 'missing'))
except Exception:
return localtz
def _get_form_token(self, req):
"""Used to protect against CSRF.
The 'form_token' is strong shared secret stored in a user
cookie. By requiring that every POST form to contain this
value we're able to protect against CSRF attacks. Since this
value is only known by the user and not by an attacker.
If the the user does not have a `trac_form_token` cookie a new
one is generated.
"""
if req.incookie.has_key('trac_form_token'):
return req.incookie['trac_form_token'].value
else:
req.outcookie['trac_form_token'] = hex_entropy(24)
req.outcookie['trac_form_token']['path'] = req.base_path or '/'
if self.env.secure_cookies:
req.outcookie['trac_form_token']['secure'] = True
if sys.version_info >= (2, 6):
req.outcookie['trac_form_token']['httponly'] = True
return req.outcookie['trac_form_token'].value
def _get_use_xsendfile(self, req):
return self.use_xsendfile
def _pre_process_request(self, req, chosen_handler):
for filter_ in self.filters:
chosen_handler = filter_.pre_process_request(req, chosen_handler)
return chosen_handler
def _post_process_request(self, req, *args):
nbargs = len(args)
resp = args
for f in reversed(self.filters):
# As the arity of `post_process_request` has changed since
# Trac 0.10, only filters with same arity gets passed real values.
# Errors will call all filters with None arguments,
# and results will not be not saved.
extra_arg_count = arity(f.post_process_request) - 1
if extra_arg_count == nbargs:
resp = f.post_process_request(req, *resp)
elif nbargs == 0:
f.post_process_request(req, *(None,)*extra_arg_count)
return resp
_slashes_re = re.compile(r'/+')
def dispatch_request(environ, start_response):
"""Main entry point for the Trac web interface.
:param environ: the WSGI environment dict
:param start_response: the WSGI callback for starting the response
"""
# SCRIPT_URL is an Apache var containing the URL before URL rewriting
# has been applied, so we can use it to reconstruct logical SCRIPT_NAME
script_url = environ.get('SCRIPT_URL')
if script_url is not None:
path_info = environ.get('PATH_INFO')
if not path_info:
environ['SCRIPT_NAME'] = script_url
else:
# mod_wsgi squashes slashes in PATH_INFO (!)
script_url = _slashes_re.sub('/', script_url)
path_info = _slashes_re.sub('/', path_info)
if script_url.endswith(path_info):
environ['SCRIPT_NAME'] = script_url[:-len(path_info)]
# If the expected configuration keys aren't found in the WSGI environment,
# try looking them up in the process environment variables
environ.setdefault('trac.env_path', os.getenv('TRAC_ENV'))
environ.setdefault('trac.env_parent_dir',
os.getenv('TRAC_ENV_PARENT_DIR'))
environ.setdefault('trac.env_index_template',
os.getenv('TRAC_ENV_INDEX_TEMPLATE'))
environ.setdefault('trac.template_vars',
os.getenv('TRAC_TEMPLATE_VARS'))
environ.setdefault('trac.locale', '')
environ.setdefault('trac.base_url',
os.getenv('TRAC_BASE_URL'))
environ.setdefault('trac.bootstrap_handler',
os.getenv('TRAC_BOOTSTRAP_HANDLER'))
locale.setlocale(locale.LC_ALL, environ['trac.locale'])
# Load handler for environment lookup and instantiation of request objects
from trac.hooks import load_bootstrap_handler
bootstrap_ep = environ['trac.bootstrap_handler']
bootstrap = load_bootstrap_handler(bootstrap_ep, environ.get('wsgi.errors'))
# Determine the environment
env = env_error = None
try:
env = bootstrap.open_environment(environ, start_response)
except RequestDone:
return []
except EnvironmentError, e:
if e.__class__ is EnvironmentError:
raise
else:
env_error = e
except Exception, e:
env_error = e
else:
try:
if env.base_url_for_redirect:
environ['trac.base_url'] = env.base_url
# Web front-end type and version information
if not hasattr(env, 'webfrontend'):
mod_wsgi_version = environ.get('mod_wsgi.version')
if mod_wsgi_version:
mod_wsgi_version = (
"%s (WSGIProcessGroup %s WSGIApplicationGroup %s)" %
('.'.join([str(x) for x in mod_wsgi_version]),
environ.get('mod_wsgi.process_group'),
environ.get('mod_wsgi.application_group') or
'%{GLOBAL}'))
environ.update({
'trac.web.frontend': 'mod_wsgi',
'trac.web.version': mod_wsgi_version})
env.webfrontend = environ.get('trac.web.frontend')
if env.webfrontend:
env.systeminfo.append((env.webfrontend,
environ['trac.web.version']))
except Exception, e:
env_error = e
run_once = environ['wsgi.run_once']
req = None
if env_error is None:
try:
req = bootstrap.create_request(env, environ, start_response) \
if env is not None else Request(environ, start_response)
except Exception, e:
log = environ.get('wsgi.errors')
if log:
log.write("[FAIL] [Trac] Entry point '%s' "
"Method 'create_request' Reason %s" %
(bootstrap_ep, repr(exception_to_unicode(e))))
if req is None:
req = RequestWithSession(environ, start_response)
translation.make_activable(lambda: req.locale, env.path if env else None)
try:
return _dispatch_request(req, env, env_error)
finally:
translation.deactivate()
if env and not run_once:
env.shutdown(threading._get_ident())
# Now it's a good time to do some clean-ups
#
# Note: enable the '##' lines as soon as there's a suspicion
# of memory leak due to uncollectable objects (typically
# objects with a __del__ method caught in a cycle)
#
##gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
unreachable = gc.collect()
##env.log.debug("%d unreachable objects found.", unreachable)
##uncollectable = len(gc.garbage)
##if uncollectable:
## del gc.garbage[:]
## env.log.warn("%d uncollectable objects found.", uncollectable)
def _dispatch_request(req, env, env_error):
resp = []
# fixup env.abs_href if `[trac] base_url` was not specified
if env and not env.abs_href.base:
env._abs_href = req.abs_href
try:
if not env and env_error:
raise HTTPInternalError(env_error)
try:
dispatcher = RequestDispatcher(env)
dispatcher.dispatch(req)
except RequestDone:
pass
resp = req._response or []
except HTTPException, e:
_send_user_error(req, env, e)
except Exception, e:
send_internal_error(env, req, sys.exc_info())
return resp
def _send_user_error(req, env, e):
# See trac/web/api.py for the definition of HTTPException subclasses.
if env:
env.log.warn('[%s] %s' % (req.remote_addr, exception_to_unicode(e)))
try:
# We first try to get localized error messages here, but we
# should ignore secondary errors if the main error was also
# due to i18n issues
title = _('Error')
if e.reason:
if title.lower() in e.reason.lower():
title = e.reason
else:
title = _('Error: %(message)s', message=e.reason)
except Exception:
title = 'Error'
# The message is based on the e.detail, which can be an Exception
# object, but not a TracError one: when creating HTTPException,
# a TracError.message is directly assigned to e.detail
if isinstance(e.detail, Exception): # not a TracError
message = exception_to_unicode(e.detail)
elif isinstance(e.detail, Fragment): # markup coming from a TracError
message = e.detail
else:
message = to_unicode(e.detail)
data = {'title': title, 'type': 'TracError', 'message': message,
'frames': [], 'traceback': None}
if e.code == 403 and req.authname == 'anonymous':
# TRANSLATOR: ... not logged in, you may want to 'do so' now (link)
do_so = tag.a(_("do so"), href=req.href.login())
req.chrome['notices'].append(
tag_("You are currently not logged in. You may want to "
"%(do_so)s now.", do_so=do_so))
try:
req.send_error(sys.exc_info(), status=e.code, env=env, data=data)
except RequestDone:
pass
def send_internal_error(env, req, exc_info):
if env:
env.log.error("Internal Server Error: %s",
exception_to_unicode(exc_info[1], traceback=True))
message = exception_to_unicode(exc_info[1])
traceback = get_last_traceback()
frames, plugins, faulty_plugins = [], [], []
th = 'http://trac-hacks.org'
has_admin = False
try:
has_admin = 'TRAC_ADMIN' in req.perm
except Exception:
pass
tracker = default_tracker
if has_admin and not isinstance(exc_info[1], MemoryError):
# Collect frame and plugin information
frames = get_frame_info(exc_info[2])
if env:
plugins = [p for p in get_plugin_info(env)
if any(c['enabled']
for m in p['modules'].itervalues()
for c in m['components'].itervalues())]
match_plugins_to_frames(plugins, frames)
# Identify the tracker where the bug should be reported
faulty_plugins = [p for p in plugins if 'frame_idx' in p]
faulty_plugins.sort(key=lambda p: p['frame_idx'])
if faulty_plugins:
info = faulty_plugins[0]['info']
if 'trac' in info:
tracker = info['trac']
elif info.get('home_page', '').startswith(th):
tracker = th
def get_description(_):
if env and has_admin:
sys_info = "".join("|| '''`%s`''' || `%s` ||\n"
% (k, v.replace('\n', '` [[br]] `'))
for k, v in env.get_systeminfo())
sys_info += "|| '''`jQuery`''' || `#JQUERY#` ||\n"
enabled_plugins = "".join("|| '''`%s`''' || `%s` ||\n"
% (p['name'], p['version'] or _('N/A'))
for p in plugins)
else:
sys_info = _("''System information not available''\n")
enabled_plugins = _("''Plugin information not available''\n")
return _("""\
==== How to Reproduce ====
While doing a %(method)s operation on `%(path_info)s`, Trac issued an internal error.
''(please provide additional details here)''
Request parameters:
{{{
%(req_args)s
}}}
User agent: `#USER_AGENT#`
==== System Information ====
%(sys_info)s
==== Enabled Plugins ====
%(enabled_plugins)s
==== Python Traceback ====
{{{
%(traceback)s}}}""",
method=req.method, path_info=req.path_info,
req_args=pformat(req.args), sys_info=sys_info,
enabled_plugins=enabled_plugins, traceback=to_unicode(traceback))
# Generate the description once in English, once in the current locale
description_en = get_description(lambda s, **kw: safefmt(s, kw))
try:
description = get_description(_)
except Exception:
description = description_en
data = {'title': 'Internal Error',
'type': 'internal', 'message': message,
'traceback': traceback, 'frames': frames,
'shorten_line': shorten_line, 'repr': safe_repr,
'plugins': plugins, 'faulty_plugins': faulty_plugins,
'tracker': tracker,
'description': description, 'description_en': description_en}
try:
req.send_error(exc_info, status=500, env=env, data=data)
except RequestDone:
pass
def send_project_index(environ, start_response, parent_dir=None,
env_paths=None):
req = Request(environ, start_response)
loadpaths = [pkg_resources.resource_filename('trac', 'templates')]
if req.environ.get('trac.env_index_template'):
env_index_template = req.environ['trac.env_index_template']
tmpl_path, template = os.path.split(env_index_template)
loadpaths.insert(0, tmpl_path)
else:
template = 'index.html'
data = {'trac': {'version': TRAC_VERSION,
'time': user_time(req, format_datetime)},
'req': req}
if req.environ.get('trac.template_vars'):
for pair in req.environ['trac.template_vars'].split(','):
key, val = pair.split('=')
data[key] = val
try:
href = Href(req.base_path)
projects = []
for env_name, env_path in get_environments(environ).items():
try:
env = open_environment(env_path,
use_cache=not environ['wsgi.run_once'])
proj = {
'env': env,
'name': env.project_name,
'description': env.project_description,
'href': href(env_name)
}
except Exception, e:
proj = {'name': env_name, 'description': to_unicode(e)}
projects.append(proj)
projects.sort(lambda x, y: cmp(x['name'].lower(), y['name'].lower()))
data['projects'] = projects
loader = TemplateLoader(loadpaths, variable_lookup='lenient',
default_encoding='utf-8')
tmpl = loader.load(template)
stream = tmpl.generate(**data)
if template.endswith('.xml'):
output = stream.render('xml')
req.send(output, 'text/xml')
else:
output = stream.render('xhtml', doctype=DocType.XHTML_STRICT,
encoding='utf-8')
req.send(output, 'text/html')
except RequestDone:
pass
def get_tracignore_patterns(env_parent_dir):
"""Return the list of patterns from env_parent_dir/.tracignore or
a default pattern of `".*"` if the file doesn't exist.
"""
path = os.path.join(env_parent_dir, '.tracignore')
try:
lines = [line.strip() for line in read_file(path).splitlines()]
except IOError:
return ['.*']
return [line for line in lines if line and not line.startswith('#')]
def get_environments(environ, warn=False):
"""Retrieve canonical environment name to path mapping.
The environments may not be all valid environments, but they are
good candidates.
"""
env_paths = environ.get('trac.env_paths', [])
env_parent_dir = environ.get('trac.env_parent_dir')
if env_parent_dir:
env_parent_dir = os.path.normpath(env_parent_dir)
paths = dircache.listdir(env_parent_dir)[:]
dircache.annotate(env_parent_dir, paths)
# Filter paths that match the .tracignore patterns
ignore_patterns = get_tracignore_patterns(env_parent_dir)
paths = [path[:-1] for path in paths if path[-1] == '/'
and not any(fnmatch.fnmatch(path[:-1], pattern)
for pattern in ignore_patterns)]
env_paths.extend(os.path.join(env_parent_dir, project) \
for project in paths)
envs = {}
for env_path in env_paths:
env_path = os.path.normpath(env_path)
if not os.path.isdir(env_path):
continue
env_name = os.path.split(env_path)[1]
if env_name in envs:
if warn:
print >> sys.stderr, ('Warning: Ignoring project "%s" since '
'it conflicts with project "%s"'
% (env_path, envs[env_name]))
else:
envs[env_name] = env_path
return envs
|
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'hearttransformwidget.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from mapclientplugins.hearttransformstep.view.transformwidget import TransformWidget
class Ui_HeartTransformWidget(object):
def setupUi(self, HeartTransformWidget):
if not HeartTransformWidget.objectName():
HeartTransformWidget.setObjectName(u"HeartTransformWidget")
HeartTransformWidget.resize(819, 567)
self.horizontalLayout = QHBoxLayout(HeartTransformWidget)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.dockWidget = QDockWidget(HeartTransformWidget)
self.dockWidget.setObjectName(u"dockWidget")
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dockWidget.sizePolicy().hasHeightForWidth())
self.dockWidget.setSizePolicy(sizePolicy)
self.dockWidget.setStyleSheet(u"QToolBox::tab {\n"
" background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\n"
" stop: 0 #E1E1E1, stop: 0.4 #DDDDDD,\n"
" stop: 0.5 #D8D8D8, stop: 1.0 #D3D3D3);\n"
" border-radius: 5px;\n"
" color: black;\n"
" }\n"
"\n"
" QToolBox::tab:selected { /* italicize selected tabs */\n"
" font: bold;\n"
" color: black;\n"
" }\n"
"QToolBox {\n"
" padding : 0\n"
"}")
self.dockWidget.setFloating(False)
self.dockWidget.setFeatures(QDockWidget.NoDockWidgetFeatures)
self.dockWidgetContents = QWidget()
self.dockWidgetContents.setObjectName(u"dockWidgetContents")
self.verticalLayout = QVBoxLayout(self.dockWidgetContents)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.toolBox = QToolBox(self.dockWidgetContents)
self.toolBox.setObjectName(u"toolBox")
self.toolBox.setFrameShape(QFrame.NoFrame)
self.toolBox.setFrameShadow(QFrame.Plain)
self.pageFile = QWidget()
self.pageFile.setObjectName(u"pageFile")
self.pageFile.setGeometry(QRect(0, 0, 124, 140))
self.verticalLayout_2 = QVBoxLayout(self.pageFile)
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.horizontalLayout_4 = QHBoxLayout()
self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")
self.pushButtonSave = QPushButton(self.pageFile)
self.pushButtonSave.setObjectName(u"pushButtonSave")
self.horizontalLayout_4.addWidget(self.pushButtonSave)
self.horizontalSpacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(self.horizontalSpacer)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QHBoxLayout()
self.horizontalLayout_5.setObjectName(u"horizontalLayout_5")
self.pushButtonLoad = QPushButton(self.pageFile)
self.pushButtonLoad.setObjectName(u"pushButtonLoad")
self.horizontalLayout_5.addWidget(self.pushButtonLoad)
self.horizontalSpacer_3 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(self.horizontalSpacer_3)
self.verticalLayout_2.addLayout(self.horizontalLayout_5)
self.verticalSpacer_5 = QSpacerItem(20, 146, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.verticalLayout_2.addItem(self.verticalSpacer_5)
self.horizontalLayout_3 = QHBoxLayout()
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.pushButtonDone = QPushButton(self.pageFile)
self.pushButtonDone.setObjectName(u"pushButtonDone")
self.horizontalLayout_3.addWidget(self.pushButtonDone)
self.horizontalSpacer_4 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(self.horizontalSpacer_4)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.verticalSpacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.verticalLayout_2.addItem(self.verticalSpacer)
self.toolBox.addItem(self.pageFile, u"File")
self.pageView = QWidget()
self.pageView.setObjectName(u"pageView")
self.pageView.setGeometry(QRect(0, 0, 124, 192))
self.verticalLayout_3 = QVBoxLayout(self.pageView)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.horizontalLayout_2 = QHBoxLayout()
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.pushButtonViewAll = QPushButton(self.pageView)
self.pushButtonViewAll.setObjectName(u"pushButtonViewAll")
self.horizontalLayout_2.addWidget(self.pushButtonViewAll)
self.horizontalSpacer_2 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(self.horizontalSpacer_2)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.horizontalLayout_6 = QHBoxLayout()
self.horizontalLayout_6.setObjectName(u"horizontalLayout_6")
self.pushButtonHideAll = QPushButton(self.pageView)
self.pushButtonHideAll.setObjectName(u"pushButtonHideAll")
self.horizontalLayout_6.addWidget(self.pushButtonHideAll)
self.horizontalSpacer_5 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(self.horizontalSpacer_5)
self.verticalLayout_3.addLayout(self.horizontalLayout_6)
self.listWidget = QListWidget(self.pageView)
self.listWidget.setObjectName(u"listWidget")
sizePolicy1 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
sizePolicy1.setHorizontalStretch(1)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(self.listWidget.sizePolicy().hasHeightForWidth())
self.listWidget.setSizePolicy(sizePolicy1)
self.listWidget.setMinimumSize(QSize(100, 0))
self.verticalLayout_3.addWidget(self.listWidget)
self.verticalSpacer_2 = QSpacerItem(20, 238, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.verticalLayout_3.addItem(self.verticalSpacer_2)
self.toolBox.addItem(self.pageView, u"View")
self.pageSegmentation = QWidget()
self.pageSegmentation.setObjectName(u"pageSegmentation")
self.pageSegmentation.setGeometry(QRect(0, 0, 198, 426))
self.verticalLayout_5 = QVBoxLayout(self.pageSegmentation)
self.verticalLayout_5.setObjectName(u"verticalLayout_5")
self.groupBox = QGroupBox(self.pageSegmentation)
self.groupBox.setObjectName(u"groupBox")
sizePolicy2 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy2.setHorizontalStretch(0)
sizePolicy2.setVerticalStretch(0)
sizePolicy2.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy2)
self.verticalLayout_4 = QVBoxLayout(self.groupBox)
self.verticalLayout_4.setObjectName(u"verticalLayout_4")
self.spinBoxPointSize = QSpinBox(self.groupBox)
self.spinBoxPointSize.setObjectName(u"spinBoxPointSize")
self.spinBoxPointSize.setMinimum(1)
self.verticalLayout_4.addWidget(self.spinBoxPointSize)
self.verticalSpacer_3 = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.verticalLayout_4.addItem(self.verticalSpacer_3)
self.verticalLayout_5.addWidget(self.groupBox)
self.groupBox_2 = QGroupBox(self.pageSegmentation)
self.groupBox_2.setObjectName(u"groupBox_2")
sizePolicy2.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy2)
self.verticalLayout_6 = QVBoxLayout(self.groupBox_2)
self.verticalLayout_6.setObjectName(u"verticalLayout_6")
self.comboBoxMode = QComboBox(self.groupBox_2)
self.comboBoxMode.addItem("")
self.comboBoxMode.addItem("")
self.comboBoxMode.addItem("")
self.comboBoxMode.setObjectName(u"comboBoxMode")
self.verticalLayout_6.addWidget(self.comboBoxMode)
self.verticalSpacer_6 = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.verticalLayout_6.addItem(self.verticalSpacer_6)
self.verticalLayout_5.addWidget(self.groupBox_2)
self.verticalSpacer_4 = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.verticalLayout_5.addItem(self.verticalSpacer_4)
self.toolBox.addItem(self.pageSegmentation, u"Transform")
self.verticalLayout.addWidget(self.toolBox)
self.dockWidget.setWidget(self.dockWidgetContents)
self.horizontalLayout.addWidget(self.dockWidget)
self.widgetZinc = TransformWidget(HeartTransformWidget)
self.widgetZinc.setObjectName(u"widgetZinc")
sizePolicy3 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy3.setHorizontalStretch(3)
sizePolicy3.setVerticalStretch(0)
sizePolicy3.setHeightForWidth(self.widgetZinc.sizePolicy().hasHeightForWidth())
self.widgetZinc.setSizePolicy(sizePolicy3)
self.horizontalLayout.addWidget(self.widgetZinc)
self.retranslateUi(HeartTransformWidget)
self.toolBox.setCurrentIndex(2)
QMetaObject.connectSlotsByName(HeartTransformWidget)
# setupUi
def retranslateUi(self, HeartTransformWidget):
HeartTransformWidget.setWindowTitle(QCoreApplication.translate("HeartTransformWidget", u"Heart Transform", None))
self.dockWidget.setWindowTitle(QCoreApplication.translate("HeartTransformWidget", u"Heart Tra&nsform Tools", None))
self.pushButtonSave.setText(QCoreApplication.translate("HeartTransformWidget", u"Save", None))
self.pushButtonLoad.setText(QCoreApplication.translate("HeartTransformWidget", u"Load", None))
self.pushButtonDone.setText(QCoreApplication.translate("HeartTransformWidget", u"Done", None))
self.toolBox.setItemText(self.toolBox.indexOf(self.pageFile), QCoreApplication.translate("HeartTransformWidget", u"File", None))
self.pushButtonViewAll.setText(QCoreApplication.translate("HeartTransformWidget", u"View All", None))
self.pushButtonHideAll.setText(QCoreApplication.translate("HeartTransformWidget", u"Hide All", None))
self.toolBox.setItemText(self.toolBox.indexOf(self.pageView), QCoreApplication.translate("HeartTransformWidget", u"View", None))
self.groupBox.setTitle(QCoreApplication.translate("HeartTransformWidget", u"Point size", None))
self.groupBox_2.setTitle(QCoreApplication.translate("HeartTransformWidget", u"Mode", None))
self.comboBoxMode.setItemText(0, QCoreApplication.translate("HeartTransformWidget", u"Apex", None))
self.comboBoxMode.setItemText(1, QCoreApplication.translate("HeartTransformWidget", u"Base", None))
self.comboBoxMode.setItemText(2, QCoreApplication.translate("HeartTransformWidget", u"RV", None))
self.toolBox.setItemText(self.toolBox.indexOf(self.pageSegmentation), QCoreApplication.translate("HeartTransformWidget", u"Transform", None))
# retranslateUi
|
|
#!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_varp_interface
short_description: Manage EOS Virtual Router configuration on specific interfaces.
description:
- This module will manage interface Varp configuration on EOS nodes. Typically
this includes Vlan interfaces only by using the ip virtual-router address
command.
version_added: 1.2.0
category: VARP
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.4.0 or later
notes:
- All configuration is idempotent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Does not support stateful resource configuration.
options:
name:
description:
- The Varp interface which will have the following shared_ip's configured.
These are typically Vlan interfaces. The interface name must match the
way it is written in the configuration. For example, use Vlan100,
not vlan100 or vlan 100.
required: true
default: null
choices: []
aliases: []
version_added: 1.2.0
shared_ip:
description:
- The list of IP addresses that will be shared in the Varp configuration.
The list of IPs should be a string of comma-separated addresses. Please
provide a list of sorted IPs.
required: true
default: null
choices: []
aliases: []
version_added: 1.2.0
"""
EXAMPLES = """
- eos_varp_interface: name=Vlan1000 shared_ip='1.1.1.2,1.1.1.3,1.1.1.4'
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
if self._node:
return self._node
self._node = self.connect()
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
changed = self.invoke(self.instance.get('state'))
self.result['changed'] = changed or True
self.refresh()
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def instance(module):
""" Returns an instance of Varp which includes the global mac-address. The
get() method will return other data but this module is only interested in
the virtual-router mac address.
"""
name = module.attributes['name']
_instance = dict(name=name)
result = module.api('varp').interfaces.get(name)
if result:
result['addresses'].sort()
_instance['shared_ip'] = ','.join(result['addresses'])
module.log('This is the list %s' % _instance['shared_ip'])
return _instance
def set_shared_ip(module):
""" Configures the defined interfaces virtual-router ip addresses. This
method takes the comma-separated list of shared IP addresses and calls
the pyeapi set_addresses method to configure the virtual addresses.
"""
name = module.attributes['name']
shared_ip = module.attributes['shared_ip'].split(',')
module.log('Invoked set_shared_ip for eos_varp_interface[%s] with '
'shared_ips %s' % (name, '.'.join(shared_ip)))
module.api('varp').interfaces.set_addresses(name, shared_ip)
def main():
""" The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
name=dict(required=True),
shared_ip=dict(required=True)
)
module = EosAnsibleModule(argument_spec=argument_spec,
stateful=False,
supports_check_mode=True)
module.flush(True)
main()
|
|
#!/usr/bin/env python
#
# Manage bundled python module files for use in PyPy.js.
#
# This script is used to manage an indexed bundle of python module files in
# a format that makes them easy to use for PyPy.js. In particular it lets
# us hack around the fact that we can't use an async XMLHttpRequest from
# inside the compiled PyPy.js VM.
#
# When PyPy.js goes to import a module, the contents of the module file
# and all of its dependencies must already be loaded into the virtual
# filesystem. But loading the entire stdlib at startup would waste time,
# bandwidth, and memory.
#
# Instead, we can load just the bundle's index file at startup, which gives
# metadata about the available modules and their dependencies. This data
# can be used to load the module files on demand before passing 'import'
# statements through to the VM for execution.
#
import os
import re
import sys
import ast
import json
import codecs
import argparse
import shutil
# The root of our pypy source checkout, if it exists.
PYPY_ROOT = os.path.join(
os.path.dirname(__file__),
"../deps/pypy",
)
# Modules that are builtin, so we shouldn't expect them in the bundle.
BUILTIN_MODULES = [
"__builtin__",
"__pypy__",
"_ast",
"_codecs",
"_collections",
"_csv",
"_file",
"_hashlib",
"_io",
"_locale",
"_md5",
"_minimal_curses",
"_multibytecodec",
"_pickle_support",
"_pypyjson",
"_random",
"_sha",
"_socket",
"_sre",
"_struct",
"_testing",
"_warnings",
"_weakref",
"array",
"binascii",
"cStringIO",
"cmath",
"errno",
"exceptions",
"gc",
"imp",
"itertools",
"js",
"marshal",
"math",
"operator",
"parser",
"posix",
"pypyjit",
"symbol",
"sys",
"time",
"token",
"unicodedata",
]
# Modules that are not going to work, so don't bother including them.
EXCLUDE_MODULES = [
"readline",
"ntpath",
"macpath",
"os2emxpath",
"ctypes",
"ctypes_support",
"ctypes_configure",
"ctypes_configure_cache",
"_ctypes",
"cffi",
"_ffi",
"_rawffi",
"subprocess",
"_subprocess",
"threading",
"thread",
"multiprocessing",
"_multiprocessing",
"audiodev",
"audioop",
"Carbon",
"MacOS",
"_osx_support"
"smtpd",
"idlelib",
"Tkinter",
"Tkconstants",
"_tkinter",
"ttk",
"__main__",
"bsddb",
"ssl",
"_ssl",
"_winreg",
"cpyext",
"symtable",
"java",
"msilib",
"dos",
"nt",
"os2",
"org.python",
"riscos",
"riscosenviron",
"vmslib",
"win32api",
"win32con",
"win32pipe",
"win32wnet",
"win32evtlog",
"msvcrt",
"hotshot",
"sunau",
"sunaudio",
"wave",
"sqlite3",
"curses",
]
# Modules that are pretty much always needed, and so should be loaded eagerly.
PRELOAD_MODULES = [
"os",
"code",
# Python has some magic to auto-load encodings when they're needed,
# which doesn't work right if they're not preloaded.
"encodings.ascii",
"encodings.hex_codec",
"encodings.base64_codec",
"encodings.latin_1",
"encodings.string_escape",
"encodings.utf_8",
"encodings.utf_16",
"encodings.unicode_internal",
"encodings.unicode_escape",
"encodings.raw_unicode_escape",
]
def main(argv):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="subcommand")
parser_init = subparsers.add_parser("init")
parser_init.add_argument("bundle_dir")
parser_init.add_argument("--exclude", action="append",
help="exclude these modules from the bundle")
parser_init.add_argument("--include", action="append",
help="include these modules in the bundle, overrides exclude")
parser_init.add_argument("--preload", action="append",
help="preload these modules in the bundle")
parser_init.add_argument("--pypy-root", action="store",
help="root directory of pypy source checkout")
parser_add = subparsers.add_parser("add")
parser_add.add_argument("bundle_dir")
parser_add.add_argument("modules", nargs="+", metavar="module")
parser_add.add_argument("--exclude", action="append",
help="exclude these modules from the bundle")
parser_add.add_argument("--preload", action="append",
help="preload these modules in the bundle")
parser_add.add_argument("--include", action="append",
help="include these modules in the bundle, overrides exclude")
parser_preload = subparsers.add_parser("preload")
parser_preload.add_argument("bundle_dir")
parser_preload.add_argument("modules", nargs="+", metavar="module")
opts = parser.parse_args(argv[1:])
bundler = ModuleBundle(opts.bundle_dir)
if opts.subcommand == "init":
cmd_init(bundler, opts)
elif opts.subcommand == "add":
cmd_add(bundler, opts)
elif opts.subcommand == "preload":
cmd_preload(bundler, opts)
else:
assert False, "unknown subcommand {}".format(opts.subcommand)
return 0
def cmd_init(bundler, opts):
# Update the bundler's exclusion list.
if opts.exclude:
for name in opts.exclude:
if not bundler.is_excluded(name):
bundler.exclude.append(name)
if opts.include:
for name in opts.include:
if bundler.is_excluded(name):
bundler.exclude.remove(name)
# Walk the pypy stdlib dirs to find all available module files and
# copy them into the bundle.
if opts.pypy_root:
pypy_root = opts.pypy_root
else:
pypy_root = PYPY_ROOT
for modroot in ("lib-python/2.7", "lib_pypy"):
rootdir = os.path.join(pypy_root, modroot)
bundler.bundle_directory(rootdir)
# Preload the default set of preloaded modules.
for name in PRELOAD_MODULES:
bundler.preload_module(name)
# Along with any that were explicitly requested.
if opts.preload:
for name in opts.preload:
bundler.preload_module(name)
bundler.flush_index()
def cmd_add(bundler, opts):
# Update the exclude list if necessary.
if opts.exclude:
for name in opts.exclude:
if not bundler.is_excluded(name):
bundler.exclude.append(name)
if opts.include:
for name in opts.include:
if bundler.is_excluded(name):
bundler.exclude.remove(name)
# Find and bundle each module/package.
for name in opts.modules:
if os.path.exists(name):
bundler.bundle_path(name)
else:
# XXX TODO: try to find it by importing it?
raise ValueError("non-existent module: {}".format(name))
# Preload any additional modules that were specified.
if opts.preload:
for name in opts.preload:
bundler.preload_module(name)
bundler.flush_index()
def cmd_preload(bundler, opts):
for name in opts.modules:
bundler.preload_module(name)
bundler.flush_index()
class ModuleBundle(object):
"""Class managing a directory of bundled modules.
This class builds up a directory containing python module files along
with an "index.json" file giving metadata about their contents and
dependencies. Loading the index gives enough information to determine
what files should be loaded in order to handle importing of any available
module.
The structure of index.json is as follows:
{
"modules": { # maps dotted module name to metadata
"a.b": {
"file": "<a.py>" # for modules, relative path to .py file
"dir": "<A>" # for packages, relative path to package dir
"imports": [] # list of module names imported by this module
}
},
"preload": { # maps dotted module name to raw file contents
"x.y": "<code>",
}
}
There is also an ancilliary file "meta.json" which tracks information
useful when building up the bundle, not unnecessary when loading modules
from it. This helps avoid paying the overhead of loading the extra
information when using the bundle.
The structure of meta.json is as follows:
{
"exclude": [ # list of modules excluded from the bundle
"some.module"
]
"missing": { # maps dotted module names that are not found in the
"a.b.c.d": [] # bundle to the modules that would import them.
}
}
"""
def __init__(self, bundle_dir):
self.bundle_dir = os.path.abspath(bundle_dir)
self.index_file = os.path.join(self.bundle_dir, "index.json")
self.meta_file = os.path.join(self.bundle_dir, "meta.json")
self.modules = {}
self.preload = {}
self.exclude = list(EXCLUDE_MODULES)
self.missing = {}
self._modules_pending_import_analysis = []
if not os.path.isdir(self.bundle_dir):
os.makedirs(self.bundle_dir)
if not os.path.exists(self.index_file):
self.flush_index()
self.load_index()
def flush_index(self):
"""Write out the index file based on in-memory state."""
# Atomically update the index file.
with open(self.index_file + ".new", "w") as f:
json.dump({
"modules": self.modules,
"preload": self.preload,
}, f, indent=2, sort_keys=True)
if sys.platform.startswith("win32"):
shutil.copy(self.index_file + ".new", self.index_file)
os.remove(self.index_file + ".new")
else:
os.rename(self.index_file + ".new", self.index_file)
# Atomically update the meta file.
with open(self.meta_file + ".new", "w") as f:
json.dump({
"exclude": self.exclude,
"missing": self.missing,
}, f, indent=2, sort_keys=True)
if sys.platform.startswith("win32"):
shutil.copy(self.meta_file + ".new", self.meta_file)
os.remove(self.meta_file + ".new")
else:
os.rename(self.meta_file + ".new", self.meta_file)
# Remove preloaded module files from disk, now that their contents
# are safely flushed to the index file.
for name in self.preload:
moddata = self.modules[name]
if "file" in moddata:
filepath = os.path.join(self.bundle_dir, moddata["file"])
if os.path.exists(filepath):
os.unlink(filepath)
def load_index(self):
"""Load in-memory state from the index file."""
with open(self.index_file) as f:
index = json.load(f)
self.modules = index["modules"]
self.preload = index["preload"]
with open(self.meta_file) as f:
meta = json.load(f)
self.exclude = meta["exclude"]
self.missing = meta["missing"]
def is_dotted_prefix(self, prefix, name):
"""Check whether a dotted name is a prefix of another."""
if name == prefix:
return True
if name.startswith(prefix):
if name[len(prefix)] == ".":
return True
return False
def is_builtin(self, name):
"""Check whether the named module is a builtin."""
for builtin in BUILTIN_MODULES:
if self.is_dotted_prefix(builtin, name):
return True
return False
def is_excluded(self, name):
"""Check whether the named module should be excluded."""
for excl in self.exclude:
if self.is_dotted_prefix(excl, name):
return True
return False
def bundle_module(self, filepath):
"""Bundle the given file as a python module."""
filepath = os.path.abspath(filepath)
rootdir, relpath = os.path.split(filepath)
self._gather_module("", rootdir, relpath)
self._perform_pending_import_analysis()
def bundle_package(self, dirpath):
"""Bundle the given directory as a python package."""
dirpath = os.path.abspath(dirpath)
rootdir, relpath = os.path.split(dirpath)
self._gather_package("", rootdir, relpath)
self._perform_pending_import_analysis()
def bundle_directory(self, dirpath):
"""Bundle all modules/packages in the given directory."""
dirpath = os.path.abspath(dirpath)
for nm in os.listdir(dirpath):
if nm.startswith("."):
continue
itempath = os.path.join(dirpath, nm)
if os.path.isdir(itempath):
if os.path.exists(os.path.join(itempath, "__init__.py")):
self.bundle_package(itempath)
elif nm.endswith(".py"):
self.bundle_module(itempath)
def bundle_path(self, path):
"""Bundle whatever exists at the given path.
The path could specify a module, a package, or a directory of modules
and packages. Its type is intuited based on the contents of the path.
"""
if os.path.isfile(path):
self.bundle_module(path)
elif os.path.isfile(os.path.join(path, "__init__.py")):
self.bundle_package(path)
else:
self.bundle_directory(path)
def _gather_module(self, package, rootdir, relpath):
"""Gather a python module file into the bundle.
Given the name of a python module, the root import directory under
which it was found, and the relative path from that root to the
module file, this method copies the file into the bundle and adds it
to the list of all available modules.
"""
modname = os.path.basename(relpath)[:-3]
if package:
modname = package + "." + modname
if not self.is_excluded(modname):
# Add it to the list of available modules.
moddata = {"file": relpath.replace("\\", "/")}
self.modules[modname] = moddata
# Copy its source file across.
self._copy_py_file(os.path.join(rootdir, relpath),
os.path.join(self.bundle_dir, relpath))
# We'll need to analyse its imports once all siblings are gathered.
self._modules_pending_import_analysis.append(modname)
def _gather_package(self, package, rootdir, relpath):
"""Recursively gather a python package directory into the bundle.
Given the name of the python package, the root import directory under
which it was found, and the relative path from that root to the
package directory, this method copies the package and all its contents
into the bundle and adds them to the list of available modules.
"""
abspath = os.path.join(rootdir, relpath)
subpackage = os.path.basename(abspath)
if package:
subpackage = package + "." + subpackage
if not self.is_excluded(subpackage):
# Note it as an available package.
self.modules[subpackage] = {"dir": relpath.replace("\\", "/")}
if not os.path.isdir(os.path.join(self.bundle_dir, relpath)):
os.makedirs(os.path.join(self.bundle_dir, relpath))
# Include it in post-gathering analysis.
self._modules_pending_import_analysis.append(subpackage)
# Recursively gather all its contents.
for nm in os.listdir(abspath):
if nm.startswith("."):
continue
subrelpath = os.path.join(relpath, nm)
subabspath = os.path.join(abspath, nm)
if os.path.isdir(subabspath):
if os.path.exists(os.path.join(subabspath, "__init__.py")):
self._gather_package(subpackage, rootdir, subrelpath)
elif nm.endswith(".py"):
self._gather_module(subpackage, rootdir, subrelpath)
def _copy_py_file(self, srcpath, dstpath):
"""Copy a python source file into the bundle.
This method copes the contents of a python source file into the bundle.
Since browsers usually expect strings in utf-8 format, it will try to
detect source files in other encodings and transparently convert them
to utf-8.
"""
# XXX TODO: copy in chunks, like shutil would do?
with open(srcpath, "rb") as f_src:
data = f_src.read()
# Look for the encoding marker in the first two lines of the file.
lines = data.split("\n", 2)
encoding = None
for i in xrange(2):
if i >= len(lines):
break
if lines[i].startswith("#"):
match = re.search(r"coding[:=]\s*([-\w.]+)", lines[i])
if match is not None:
encoding = match.group(1)
try:
codecs.lookup(encoding)
except LookupError:
encoding = None
break
# Write normalized data to output file.
with open(dstpath, "wb") as f_dst:
if encoding is None:
f_dst.write(data)
else:
for j in xrange(i):
f_dst.write(lines[j])
f_dst.write("\n")
f_dst.write(lines[i].replace(encoding, "utf-8"))
f_dst.write("\n")
for j in xrange(i + 1, len(lines)):
f_dst.write(lines[j].decode(encoding).encode("utf8"))
if j < len(lines) - 1:
f_dst.write("\n")
def _perform_pending_import_analysis(self):
"""Perform import analysis on any pending modules.
To make it easier to resolve intra-package relative imports, we
delay doing any import analsyis until all the contents of a package
have been gathered into the bundle. This method is called after
the gathering in order to perform the pending analyses.
"""
while self._modules_pending_import_analysis:
modname = self._modules_pending_import_analysis.pop()
# Check if this new module resolves previously-missing imports.
# XXX TODO: this is pretty ugly and inefficient...
for depname in self.missing.keys():
if self.is_dotted_prefix(modname, depname):
revdeps = self.missing.pop(depname)
for revdepname in revdeps:
revdepdata = self.modules[revdepname]
revdepdata["imports"].remove(depname)
if modname not in revdepdata["imports"]:
revdepdata["imports"].append(modname)
# Find all the names that it imports.
moddata = self.modules[modname]
if "file" not in moddata:
continue
modpath = os.path.join(self.bundle_dir, moddata["file"])
impf = ImportFinder(modname, modpath, self.modules)
moddata["imports"] = impf.find_imported_modules()
# Check for any imports that are missing from the bundle.
for depname in moddata["imports"]:
if depname not in self.modules:
if not self.is_excluded(depname):
if not self.is_builtin(depname):
if depname not in self.missing:
self.missing[depname] = []
self.missing[depname].append(modname)
def preload_module(self, name):
"""Preload a module's file data into the index itself.
This is a little trick to speed up loading of commonly-used modules.
Rather than having the module's file data as a separate file on disk,
we store it as a string directly in the index file, and avoid doing
a separate network access to load it at VM startup time.
"""
for depname in self._find_transitive_dependencies(name):
if depname in self.preload:
continue
moddata = self.modules[depname]
if "file" in moddata:
filepath = os.path.join(self.bundle_dir, moddata["file"])
with open(filepath, "r") as f:
self.preload[depname] = f.read()
def _find_transitive_dependencies(self, name, seen=None):
"""Transitively find all dependencies of a module."""
if seen is None:
seen = set((name,))
moddata = self.modules.get(name)
if moddata is not None:
deps = set()
imports = moddata.get("imports")
if imports is not None:
deps.update(imports)
if "dir" in moddata:
deps.add(name + ".__init__")
if "." in name:
deps.add(name.rsplit(".", 1)[0])
seen.add(name)
for dep in deps:
if dep not in seen:
self._find_transitive_dependencies(dep, seen)
return seen
class ImportFinder(ast.NodeVisitor):
"""An AST NodeVisitor for finding all names imported in a python file."""
def __init__(self, module, filepath, known_modules):
super(ImportFinder, self).__init__()
self.module = module
if "." in module:
self.package = module.rsplit(".", 1)[0]
else:
self.package = ""
self.filepath = filepath
self.known_modules = known_modules
self.imported_names = set()
self.uses_absolute_import = False
def find_imported_modules(self):
with open(self.filepath, "r") as f:
code = f.read()
try:
n = ast.parse(code)
except SyntaxError:
return []
self.visit(n)
return sorted(list(self.imported_names))
def visit_Import(self, node):
for alias in node.names:
self.record_imported_name(alias.name)
def visit_ImportFrom(self, node):
if node.module == "__future__":
for alias in node.names:
if alias.name == "absolute_import":
self.uses_absolute_import = True
prefix = "." * node.level
if node.module is not None:
prefix += node.module + "."
for alias in node.names:
self.record_imported_name(prefix + alias.name)
def record_imported_name(self, name):
# Dereference explicit relative imports indicated by leading dots.
if name[0] == ".":
name = name[1:]
pkgbits = self.package.split(".")
while name[0] == ".":
name = name[1:]
pkgbits = pkgbits[:-1]
name = ".".join(pkgbits) + "." + name
# Resolve implicit relative imports within the containing package.
# This depends on self.known_modules having all sibling modules.
elif not self.uses_absolute_import and self.package:
pkgname = self.package
relname = name.rsplit(".", 1)[0]
while True:
absname = pkgname + "." + relname
if absname in self.known_modules:
name = pkgname + "." + name
break
if "." not in pkgname:
break
pkgname = pkgname.rsplit(".", 1)[0]
# Strip trailing components to try to find a known module name.
orig_name = name
while name not in self.known_modules and "." in name:
name = name.rsplit(".", 1)[0]
if name in self.known_modules:
self.imported_names.add(name)
else:
self.imported_names.add(orig_name)
if __name__ == "__main__":
res = main(sys.argv)
sys.exit(res)
|
|
# -----------------------------------------------------------------------------
# cparse.py
#
# Simple parser for ANSI C. Based on the grammar in K&R, 2nd Ed.
# -----------------------------------------------------------------------------
import sys
import clex
import ply.yacc as yacc
# Get the token map
tokens = clex.tokens
# translation-unit:
def p_translation_unit_1(t):
'translation_unit : external_declaration'
pass
def p_translation_unit_2(t):
'translation_unit : translation_unit external_declaration'
pass
# external-declaration:
def p_external_declaration_1(t):
'external_declaration : function_definition'
pass
def p_external_declaration_2(t):
'external_declaration : declaration'
pass
# function-definition:
def p_function_definition_1(t):
'function_definition : declaration_specifiers declarator declaration_list compound_statement'
pass
def p_function_definition_2(t):
'function_definition : declarator declaration_list compound_statement'
pass
def p_function_definition_3(t):
'function_definition : declarator compound_statement'
pass
def p_function_definition_4(t):
'function_definition : declaration_specifiers declarator compound_statement'
pass
# declaration:
def p_declaration_1(t):
'declaration : declaration_specifiers init_declarator_list SEMI'
pass
def p_declaration_2(t):
'declaration : declaration_specifiers SEMI'
pass
# declaration-list:
def p_declaration_list_1(t):
'declaration_list : declaration'
pass
def p_declaration_list_2(t):
'declaration_list : declaration_list declaration '
pass
# declaration-specifiers
def p_declaration_specifiers_1(t):
'declaration_specifiers : storage_class_specifier declaration_specifiers'
pass
def p_declaration_specifiers_2(t):
'declaration_specifiers : type_specifier declaration_specifiers'
pass
def p_declaration_specifiers_3(t):
'declaration_specifiers : type_qualifier declaration_specifiers'
pass
def p_declaration_specifiers_4(t):
'declaration_specifiers : storage_class_specifier'
pass
def p_declaration_specifiers_5(t):
'declaration_specifiers : type_specifier'
pass
def p_declaration_specifiers_6(t):
'declaration_specifiers : type_qualifier'
pass
# storage-class-specifier
def p_storage_class_specifier(t):
'''storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
'''
pass
# type-specifier:
def p_type_specifier(t):
'''type_specifier : VOID
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| SIGNED
| UNSIGNED
| struct_or_union_specifier
| enum_specifier
| TYPEID
'''
pass
# type-qualifier:
def p_type_qualifier(t):
'''type_qualifier : CONST
| VOLATILE'''
pass
# struct-or-union-specifier
def p_struct_or_union_specifier_1(t):
'struct_or_union_specifier : struct_or_union ID LBRACE struct_declaration_list RBRACE'
pass
def p_struct_or_union_specifier_2(t):
'struct_or_union_specifier : struct_or_union LBRACE struct_declaration_list RBRACE'
pass
def p_struct_or_union_specifier_3(t):
'struct_or_union_specifier : struct_or_union ID'
pass
# struct-or-union:
def p_struct_or_union(t):
'''struct_or_union : STRUCT
| UNION
'''
pass
# struct-declaration-list:
def p_struct_declaration_list_1(t):
'struct_declaration_list : struct_declaration'
pass
def p_struct_declaration_list_2(t):
'struct_declaration_list : struct_declaration_list struct_declaration'
pass
# init-declarator-list:
def p_init_declarator_list_1(t):
'init_declarator_list : init_declarator'
pass
def p_init_declarator_list_2(t):
'init_declarator_list : init_declarator_list COMMA init_declarator'
pass
# init-declarator
def p_init_declarator_1(t):
'init_declarator : declarator'
pass
def p_init_declarator_2(t):
'init_declarator : declarator EQUALS initializer'
pass
# struct-declaration:
def p_struct_declaration(t):
'struct_declaration : specifier_qualifier_list struct_declarator_list SEMI'
pass
# specifier-qualifier-list:
def p_specifier_qualifier_list_1(t):
'specifier_qualifier_list : type_specifier specifier_qualifier_list'
pass
def p_specifier_qualifier_list_2(t):
'specifier_qualifier_list : type_specifier'
pass
def p_specifier_qualifier_list_3(t):
'specifier_qualifier_list : type_qualifier specifier_qualifier_list'
pass
def p_specifier_qualifier_list_4(t):
'specifier_qualifier_list : type_qualifier'
pass
# struct-declarator-list:
def p_struct_declarator_list_1(t):
'struct_declarator_list : struct_declarator'
pass
def p_struct_declarator_list_2(t):
'struct_declarator_list : struct_declarator_list COMMA struct_declarator'
pass
# struct-declarator:
def p_struct_declarator_1(t):
'struct_declarator : declarator'
pass
def p_struct_declarator_2(t):
'struct_declarator : declarator COLON constant_expression'
pass
def p_struct_declarator_3(t):
'struct_declarator : COLON constant_expression'
pass
# enum-specifier:
def p_enum_specifier_1(t):
'enum_specifier : ENUM ID LBRACE enumerator_list RBRACE'
pass
def p_enum_specifier_2(t):
'enum_specifier : ENUM LBRACE enumerator_list RBRACE'
pass
def p_enum_specifier_3(t):
'enum_specifier : ENUM ID'
pass
# enumerator_list:
def p_enumerator_list_1(t):
'enumerator_list : enumerator'
pass
def p_enumerator_list_2(t):
'enumerator_list : enumerator_list COMMA enumerator'
pass
# enumerator:
def p_enumerator_1(t):
'enumerator : ID'
pass
def p_enumerator_2(t):
'enumerator : ID EQUALS constant_expression'
pass
# declarator:
def p_declarator_1(t):
'declarator : pointer direct_declarator'
pass
def p_declarator_2(t):
'declarator : direct_declarator'
pass
# direct-declarator:
def p_direct_declarator_1(t):
'direct_declarator : ID'
pass
def p_direct_declarator_2(t):
'direct_declarator : LPAREN declarator RPAREN'
pass
def p_direct_declarator_3(t):
'direct_declarator : direct_declarator LBRACKET constant_expression_opt RBRACKET'
pass
def p_direct_declarator_4(t):
'direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN '
pass
def p_direct_declarator_5(t):
'direct_declarator : direct_declarator LPAREN identifier_list RPAREN '
pass
def p_direct_declarator_6(t):
'direct_declarator : direct_declarator LPAREN RPAREN '
pass
# pointer:
def p_pointer_1(t):
'pointer : TIMES type_qualifier_list'
pass
def p_pointer_2(t):
'pointer : TIMES'
pass
def p_pointer_3(t):
'pointer : TIMES type_qualifier_list pointer'
pass
def p_pointer_4(t):
'pointer : TIMES pointer'
pass
# type-qualifier-list:
def p_type_qualifier_list_1(t):
'type_qualifier_list : type_qualifier'
pass
def p_type_qualifier_list_2(t):
'type_qualifier_list : type_qualifier_list type_qualifier'
pass
# parameter-type-list:
def p_parameter_type_list_1(t):
'parameter_type_list : parameter_list'
pass
def p_parameter_type_list_2(t):
'parameter_type_list : parameter_list COMMA ELLIPSIS'
pass
# parameter-list:
def p_parameter_list_1(t):
'parameter_list : parameter_declaration'
pass
def p_parameter_list_2(t):
'parameter_list : parameter_list COMMA parameter_declaration'
pass
# parameter-declaration:
def p_parameter_declaration_1(t):
'parameter_declaration : declaration_specifiers declarator'
pass
def p_parameter_declaration_2(t):
'parameter_declaration : declaration_specifiers abstract_declarator_opt'
pass
# identifier-list:
def p_identifier_list_1(t):
'identifier_list : ID'
pass
def p_identifier_list_2(t):
'identifier_list : identifier_list COMMA ID'
pass
# initializer:
def p_initializer_1(t):
'initializer : assignment_expression'
pass
def p_initializer_2(t):
'''initializer : LBRACE initializer_list RBRACE
| LBRACE initializer_list COMMA RBRACE'''
pass
# initializer-list:
def p_initializer_list_1(t):
'initializer_list : initializer'
pass
def p_initializer_list_2(t):
'initializer_list : initializer_list COMMA initializer'
pass
# type-name:
def p_type_name(t):
'type_name : specifier_qualifier_list abstract_declarator_opt'
pass
def p_abstract_declarator_opt_1(t):
'abstract_declarator_opt : empty'
pass
def p_abstract_declarator_opt_2(t):
'abstract_declarator_opt : abstract_declarator'
pass
# abstract-declarator:
def p_abstract_declarator_1(t):
'abstract_declarator : pointer '
pass
def p_abstract_declarator_2(t):
'abstract_declarator : pointer direct_abstract_declarator'
pass
def p_abstract_declarator_3(t):
'abstract_declarator : direct_abstract_declarator'
pass
# direct-abstract-declarator:
def p_direct_abstract_declarator_1(t):
'direct_abstract_declarator : LPAREN abstract_declarator RPAREN'
pass
def p_direct_abstract_declarator_2(t):
'direct_abstract_declarator : direct_abstract_declarator LBRACKET constant_expression_opt RBRACKET'
pass
def p_direct_abstract_declarator_3(t):
'direct_abstract_declarator : LBRACKET constant_expression_opt RBRACKET'
pass
def p_direct_abstract_declarator_4(t):
'direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN'
pass
def p_direct_abstract_declarator_5(t):
'direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN'
pass
# Optional fields in abstract declarators
def p_constant_expression_opt_1(t):
'constant_expression_opt : empty'
pass
def p_constant_expression_opt_2(t):
'constant_expression_opt : constant_expression'
pass
def p_parameter_type_list_opt_1(t):
'parameter_type_list_opt : empty'
pass
def p_parameter_type_list_opt_2(t):
'parameter_type_list_opt : parameter_type_list'
pass
# statement:
def p_statement(t):
'''
statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
'''
pass
# labeled-statement:
def p_labeled_statement_1(t):
'labeled_statement : ID COLON statement'
pass
def p_labeled_statement_2(t):
'labeled_statement : CASE constant_expression COLON statement'
pass
def p_labeled_statement_3(t):
'labeled_statement : DEFAULT COLON statement'
pass
# expression-statement:
def p_expression_statement(t):
'expression_statement : expression_opt SEMI'
pass
# compound-statement:
def p_compound_statement_1(t):
'compound_statement : LBRACE declaration_list statement_list RBRACE'
pass
def p_compound_statement_2(t):
'compound_statement : LBRACE statement_list RBRACE'
pass
def p_compound_statement_3(t):
'compound_statement : LBRACE declaration_list RBRACE'
pass
def p_compound_statement_4(t):
'compound_statement : LBRACE RBRACE'
pass
# statement-list:
def p_statement_list_1(t):
'statement_list : statement'
pass
def p_statement_list_2(t):
'statement_list : statement_list statement'
pass
# selection-statement
def p_selection_statement_1(t):
'selection_statement : IF LPAREN expression RPAREN statement'
pass
def p_selection_statement_2(t):
'selection_statement : IF LPAREN expression RPAREN statement ELSE statement '
pass
def p_selection_statement_3(t):
'selection_statement : SWITCH LPAREN expression RPAREN statement '
pass
# iteration_statement:
def p_iteration_statement_1(t):
'iteration_statement : WHILE LPAREN expression RPAREN statement'
pass
def p_iteration_statement_2(t):
'iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement '
pass
def p_iteration_statement_3(t):
'iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI'
pass
# jump_statement:
def p_jump_statement_1(t):
'jump_statement : GOTO ID SEMI'
pass
def p_jump_statement_2(t):
'jump_statement : CONTINUE SEMI'
pass
def p_jump_statement_3(t):
'jump_statement : BREAK SEMI'
pass
def p_jump_statement_4(t):
'jump_statement : RETURN expression_opt SEMI'
pass
def p_expression_opt_1(t):
'expression_opt : empty'
pass
def p_expression_opt_2(t):
'expression_opt : expression'
pass
# expression:
def p_expression_1(t):
'expression : assignment_expression'
pass
def p_expression_2(t):
'expression : expression COMMA assignment_expression'
pass
# assigment_expression:
def p_assignment_expression_1(t):
'assignment_expression : conditional_expression'
pass
def p_assignment_expression_2(t):
'assignment_expression : unary_expression assignment_operator assignment_expression'
pass
# assignment_operator:
def p_assignment_operator(t):
'''
assignment_operator : EQUALS
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
| XOREQUAL
'''
pass
# conditional-expression
def p_conditional_expression_1(t):
'conditional_expression : logical_or_expression'
pass
def p_conditional_expression_2(t):
'conditional_expression : logical_or_expression CONDOP expression COLON conditional_expression '
pass
# constant-expression
def p_constant_expression(t):
'constant_expression : conditional_expression'
pass
# logical-or-expression
def p_logical_or_expression_1(t):
'logical_or_expression : logical_and_expression'
pass
def p_logical_or_expression_2(t):
'logical_or_expression : logical_or_expression LOR logical_and_expression'
pass
# logical-and-expression
def p_logical_and_expression_1(t):
'logical_and_expression : inclusive_or_expression'
pass
def p_logical_and_expression_2(t):
'logical_and_expression : logical_and_expression LAND inclusive_or_expression'
pass
# inclusive-or-expression:
def p_inclusive_or_expression_1(t):
'inclusive_or_expression : exclusive_or_expression'
pass
def p_inclusive_or_expression_2(t):
'inclusive_or_expression : inclusive_or_expression OR exclusive_or_expression'
pass
# exclusive-or-expression:
def p_exclusive_or_expression_1(t):
'exclusive_or_expression : and_expression'
pass
def p_exclusive_or_expression_2(t):
'exclusive_or_expression : exclusive_or_expression XOR and_expression'
pass
# AND-expression
def p_and_expression_1(t):
'and_expression : equality_expression'
pass
def p_and_expression_2(t):
'and_expression : and_expression AND equality_expression'
pass
# equality-expression:
def p_equality_expression_1(t):
'equality_expression : relational_expression'
pass
def p_equality_expression_2(t):
'equality_expression : equality_expression EQ relational_expression'
pass
def p_equality_expression_3(t):
'equality_expression : equality_expression NE relational_expression'
pass
# relational-expression:
def p_relational_expression_1(t):
'relational_expression : shift_expression'
pass
def p_relational_expression_2(t):
'relational_expression : relational_expression LT shift_expression'
pass
def p_relational_expression_3(t):
'relational_expression : relational_expression GT shift_expression'
pass
def p_relational_expression_4(t):
'relational_expression : relational_expression LE shift_expression'
pass
def p_relational_expression_5(t):
'relational_expression : relational_expression GE shift_expression'
pass
# shift-expression
def p_shift_expression_1(t):
'shift_expression : additive_expression'
pass
def p_shift_expression_2(t):
'shift_expression : shift_expression LSHIFT additive_expression'
pass
def p_shift_expression_3(t):
'shift_expression : shift_expression RSHIFT additive_expression'
pass
# additive-expression
def p_additive_expression_1(t):
'additive_expression : multiplicative_expression'
pass
def p_additive_expression_2(t):
'additive_expression : additive_expression PLUS multiplicative_expression'
pass
def p_additive_expression_3(t):
'additive_expression : additive_expression MINUS multiplicative_expression'
pass
# multiplicative-expression
def p_multiplicative_expression_1(t):
'multiplicative_expression : cast_expression'
pass
def p_multiplicative_expression_2(t):
'multiplicative_expression : multiplicative_expression TIMES cast_expression'
pass
def p_multiplicative_expression_3(t):
'multiplicative_expression : multiplicative_expression DIVIDE cast_expression'
pass
def p_multiplicative_expression_4(t):
'multiplicative_expression : multiplicative_expression MOD cast_expression'
pass
# cast-expression:
def p_cast_expression_1(t):
'cast_expression : unary_expression'
pass
def p_cast_expression_2(t):
'cast_expression : LPAREN type_name RPAREN cast_expression'
pass
# unary-expression:
def p_unary_expression_1(t):
'unary_expression : postfix_expression'
pass
def p_unary_expression_2(t):
'unary_expression : PLUSPLUS unary_expression'
pass
def p_unary_expression_3(t):
'unary_expression : MINUSMINUS unary_expression'
pass
def p_unary_expression_4(t):
'unary_expression : unary_operator cast_expression'
pass
def p_unary_expression_5(t):
'unary_expression : SIZEOF unary_expression'
pass
def p_unary_expression_6(t):
'unary_expression : SIZEOF LPAREN type_name RPAREN'
pass
#unary-operator
def p_unary_operator(t):
'''unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT '''
pass
# postfix-expression:
def p_postfix_expression_1(t):
'postfix_expression : primary_expression'
pass
def p_postfix_expression_2(t):
'postfix_expression : postfix_expression LBRACKET expression RBRACKET'
pass
def p_postfix_expression_3(t):
'postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN'
pass
def p_postfix_expression_4(t):
'postfix_expression : postfix_expression LPAREN RPAREN'
pass
def p_postfix_expression_5(t):
'postfix_expression : postfix_expression PERIOD ID'
pass
def p_postfix_expression_6(t):
'postfix_expression : postfix_expression ARROW ID'
pass
def p_postfix_expression_7(t):
'postfix_expression : postfix_expression PLUSPLUS'
pass
def p_postfix_expression_8(t):
'postfix_expression : postfix_expression MINUSMINUS'
pass
# primary-expression:
def p_primary_expression(t):
'''primary_expression : ID
| constant
| SCONST
| LPAREN expression RPAREN'''
pass
# argument-expression-list:
def p_argument_expression_list(t):
'''argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression'''
pass
# constant:
def p_constant(t):
'''constant : ICONST
| FCONST
| CCONST'''
pass
def p_empty(t):
'empty : '
pass
def p_error(t):
print("Whoa. We're hosed")
import profile
# Build the grammar
yacc.yacc(method='LALR')
#profile.run("yacc.yacc(method='LALR')")
|
|
"""
Martin O'Hanlon
www.stuffaboutcode.com
PiAware Radar
"""
from argparse import ArgumentParser
import pygame
from radar import Radar
from gpsutils import GPSUtils
from flightdata import FlightData
from copy import deepcopy
from threading import Timer
def lat_lon_to_x_y(lat, lon):
x, y = GPSUtils.lat_lon_to_x_y(lat, lon)
x = int(x)
y = int(y)
#negate the y so the numbers are 'going the right way' as the screen goes down!
y = y * -1
return x, y
def calc_scale_km(scale, line_length):
return str(round((line_length / scale) / 1000)) + " km"
#read command line options
parser = ArgumentParser(description="PiAware Flight Radar")
parser.add_argument("lat", type=float, help="The latitude of the receiver")
parser.add_argument("lon", type=float, help="The longitude of the receiver")
parser.add_argument("--piawareip", help="The ip address of the piaware server")
parser.add_argument("--screen", help="The screen config to use [normal / touch]")
parser.add_argument("--fullscreen", help="Fullscreen radar", action="store_true")
args = parser.parse_args()
#import the relevant constants
if args.screen == "touch":
from const_touch import *
else:
from const_normal import *
#create the piaware url
piawareip = args.piawareip
if piawareip == None: piawareip = "localhost"
piaware_url = "http://{}:8080/data.json".format(piawareip)
#set default scale
scale = 0.001
#init pygame
pygame.init()
#load font
myfont = pygame.font.SysFont(FONT, FONTSIZE)
#create the screen
screenflags = 0
if args.fullscreen: screenflags = pygame.FULLSCREEN
screen = pygame.display.set_mode(SIZE, screenflags)
#setup the screen
#set the screen background
screen.fill(BLACK)
#set the screen caption
pygame.display.set_caption("Piaware Radar")
#draw close button
close_rect = pygame.draw.rect(screen, GREEN, CLOSERECT, 2)
pygame.draw.line(screen, GREEN,
(CLOSERECT[0], CLOSERECT[1]), (CLOSERECT[0] + CLOSERECT[2], CLOSERECT[1] + CLOSERECT[3]),
1)
pygame.draw.line(screen, GREEN,
(CLOSERECT[0], CLOSERECT[1] + CLOSERECT[3]), (CLOSERECT[0] + CLOSERECT[2], CLOSERECT[1]),
1)
#draw scale plus button
scale_plus_rect = pygame.draw.rect(screen, GREEN, SCALEPLUSRECT, 2)
pygame.draw.line(screen, GREEN,
(SCALEPLUSRECT[0] + (SCALEPLUSRECT[2] / 2), SCALEPLUSRECT[1] + 5),
(SCALEPLUSRECT[0] + (SCALEPLUSRECT[2] / 2), SCALEPLUSRECT[1] + SCALEPLUSRECT[3] - 5),
1)
pygame.draw.line(screen, GREEN,
(SCALEPLUSRECT[0] + 5, SCALEPLUSRECT[1] + (SCALEPLUSRECT[3] / 2)),
(SCALEPLUSRECT[0] + SCALEPLUSRECT[2] - 5, SCALEPLUSRECT[1] + (SCALEPLUSRECT[3] / 2)),
1)
#draw scale negative button
scale_neg_rect = pygame.draw.rect(screen, GREEN, SCALENEGRECT, 2)
pygame.draw.line(screen, GREEN,
(SCALENEGRECT[0] + 5, SCALENEGRECT[1] + (SCALENEGRECT[3] / 2)),
(SCALENEGRECT[0] + SCALENEGRECT[2] - 5, SCALENEGRECT[1] + (SCALENEGRECT[3] / 2)),
1)
#draw scale line
pygame.draw.line(screen, GREEN,
(SCALELINERECT[0], SCALELINERECT[1] + (SCALELINERECT[3] / 2)),
(SCALELINERECT[0] + SCALELINERECT[2], SCALELINERECT[1] + (SCALELINERECT[3] / 2)),
1)
pygame.draw.line(screen, GREEN,
(SCALELINERECT[0], SCALELINERECT[1]),
(SCALELINERECT[0], SCALELINERECT[1] + SCALELINERECT[3]),
1)
pygame.draw.line(screen, GREEN,
(SCALELINERECT[0] + SCALELINERECT[2], SCALELINERECT[1]),
(SCALELINERECT[0] + SCALELINERECT[2], SCALELINERECT[1] + SCALELINERECT[3]),
1)
scale_text = myfont.render(calc_scale_km(scale, SCALELINERECT[2]), 1, GREEN)
screen.blit(scale_text, (SCALETEXTRECT[0], SCALETEXTRECT[1]))
#title
title = myfont.render("PiAware Radar", 1, GREEN)
screen.blit(title, TITLEPOS)
sac = myfont.render("stuffaboutcode.com", 1, GREEN)
screen.blit(sac, (TITLEPOS[0], TITLEPOS[1] + LINESPACE))
#flight data timer
flight_data_timer = None
#get the home position
home_x, home_y = lat_lon_to_x_y(args.lat, args.lon)
#startup the radar
radar = Radar(screen, RADARRECT, radar_pos = (home_x, home_y), scale = scale, back_col = BLACK, radar_col = GREEN)
radar.start()
#get the flight data
myflights = FlightData(data_url = piaware_url)
done = False
next_refresh = pygame.time.get_ticks()
while not done:
#should the flights be refreshed
if pygame.time.get_ticks() > next_refresh:
#keep a track of the last aircraft we saw
lastaircraftseen = deepcopy(myflights.aircraft)
#loop through all the flights and update them
for aircraft in myflights.aircraft:
if aircraft.validposition:
x, y = lat_lon_to_x_y(aircraft.lat, aircraft.lon)
radar.dot_add(aircraft.hex, x, y, data = aircraft)
next_refresh += FLIGHTDATAREFRESH
myflights.refresh()
#remove the dots that are no longer there
for lostaircraft in set(lastaircraftseen) - set(myflights.aircraft):
radar.dot_remove(lostaircraft.hex)
#check for pygame events
for event in pygame.event.get():
#quit
if event.type == pygame.QUIT:
done = True
#mouse press
if event.type == pygame.MOUSEBUTTONDOWN:
mousepos = pygame.mouse.get_pos()
#close button
if close_rect.collidepoint(mousepos):
done = True
#scale buttons
if scale_plus_rect.collidepoint(mousepos):
scale = scale * 2
pygame.draw.rect(screen, BLACK, SCALETEXTRECT, 0)
scale_text = myfont.render(calc_scale_km(scale, SCALELINERECT[2]), 1, GREEN)
screen.blit(scale_text, (SCALETEXTRECT[0], SCALETEXTRECT[1]))
radar.set_scale(scale)
if scale_neg_rect.collidepoint(mousepos):
scale = scale * 0.5
pygame.draw.rect(screen, BLACK, SCALETEXTRECT, 0)
scale_text = myfont.render(calc_scale_km(scale, SCALELINERECT[2]), 1, GREEN)
screen.blit(scale_text, (SCALETEXTRECT[0], SCALETEXTRECT[1]))
radar.set_scale(scale)
#dot clicked
dot_found = radar.dot_at_point(mousepos)
if dot_found != None:
#output the flight data to the screen
pygame.draw.rect(screen, BLACK, FLIGHTDATARECT, 0)
text = myfont.render(radar.dots[dot_found].data.hex, 1, GREEN)
screen.blit(text, (FLIGHTDATAPOS[0], FLIGHTDATAPOS[1]))
text = myfont.render(radar.dots[dot_found].data.squawk, 1, GREEN)
screen.blit(text, (FLIGHTDATAPOS[0], FLIGHTDATAPOS[1] + LINESPACE))
text = myfont.render(radar.dots[dot_found].data.flight, 1, GREEN)
screen.blit(text, (FLIGHTDATAPOS[0], FLIGHTDATAPOS[1] + (LINESPACE * 2)))
text = myfont.render("lat: " + str(radar.dots[dot_found].data.lat), 1, GREEN)
screen.blit(text, (FLIGHTDATAPOS[0], FLIGHTDATAPOS[1] + (LINESPACE * 3)))
text = myfont.render("lon: " + str(radar.dots[dot_found].data.lon), 1, GREEN)
screen.blit(text, (FLIGHTDATAPOS[0], FLIGHTDATAPOS[1] + (LINESPACE * 4)))
text = myfont.render("alt: " + str(radar.dots[dot_found].data.altitude), 1, GREEN)
screen.blit(text, (FLIGHTDATAPOS[0], FLIGHTDATAPOS[1] + (LINESPACE * 5)))
text = myfont.render("spd: " + str(radar.dots[dot_found].data.speed), 1, GREEN)
screen.blit(text, (FLIGHTDATAPOS[0], FLIGHTDATAPOS[1] + (LINESPACE * 6)))
#start the flight data timer
if flight_data_timer != None: flight_data_timer.cancel()
flight_data_timer = Timer(FLIGHTDATATIMEOUT, pygame.draw.rect, (screen, BLACK, FLIGHTDATARECT, 0))
flight_data_timer.start()
#wait for a while
pygame.time.wait(10)
#stop
if flight_data_timer != None: flight_data_timer.cancel()
radar.stop()
pygame.quit()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import functools
import logging
import os
import shutil
import signal
import tempfile
import threading
import time
import psutil
from helpers import (unittest, with_config, skipOnTravis, LuigiTestCase,
temporary_unloaded_module)
import luigi.notifications
import luigi.task_register
import luigi.worker
import mock
from luigi import ExternalTask, RemoteScheduler, Task, Event
from luigi.mock import MockTarget, MockFileSystem
from luigi.scheduler import Scheduler
from luigi.worker import Worker
from luigi.rpc import RPCError
from luigi import six
from luigi.cmdline import luigi_run
luigi.notifications.DEBUG = True
class DummyTask(Task):
def __init__(self, *args, **kwargs):
super(DummyTask, self).__init__(*args, **kwargs)
self.has_run = False
def complete(self):
return self.has_run
def run(self):
logging.debug("%s - setting has_run", self)
self.has_run = True
class DynamicDummyTask(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.p)
def run(self):
with self.output().open('w') as f:
f.write('Done!')
time.sleep(0.5) # so we can benchmark & see if parallelization works
class DynamicDummyTaskWithNamespace(DynamicDummyTask):
task_namespace = 'banana'
class DynamicRequires(Task):
p = luigi.Parameter()
use_banana_task = luigi.BoolParameter(default=False)
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'parent'))
def run(self):
if self.use_banana_task:
task_cls = DynamicDummyTaskWithNamespace
else:
task_cls = DynamicDummyTask
dummy_targets = yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5)]
dummy_targets += yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5, 7)]
with self.output().open('w') as f:
for i, d in enumerate(dummy_targets):
for line in d.open('r'):
print('%d: %s' % (i, line.strip()), file=f)
class DynamicRequiresOtherModule(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'baz'))
def run(self):
import other_module
other_target_foo = yield other_module.OtherModuleTask(os.path.join(self.p, 'foo')) # NOQA
other_target_bar = yield other_module.OtherModuleTask(os.path.join(self.p, 'bar')) # NOQA
with self.output().open('w') as f:
f.write('Done!')
class DummyErrorTask(Task):
retry_index = 0
def run(self):
self.retry_index += 1
raise Exception("Retry index is %s for %s" % (self.retry_index, self.task_family))
class WorkerTest(LuigiTestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.time = time.time
with Worker(scheduler=self.sch, worker_id='X') as w, Worker(scheduler=self.sch, worker_id='Y') as w2:
self.w = w
self.w2 = w2
super(WorkerTest, self).run(result)
if time.time != self.time:
time.time = self.time
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
class A(Task):
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertTrue(a.has_run)
self.assertTrue(b.has_run)
def test_external_dep(self):
class A(ExternalTask):
def complete(self):
return False
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(A):
def requires(self):
return luigi.task.externalize(a)
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_legacy_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
a.run = NotImplemented
class B(A):
def requires(self):
return a
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_type_error_in_tracking_run_deprecated(self):
class A(Task):
num_runs = 0
def complete(self):
return False
def run(self, tracking_url_callback=None):
self.num_runs += 1
raise TypeError('bad type')
a = A()
self.assertTrue(self.w.add(a))
self.assertFalse(self.w.run())
# Should only run and fail once, not retry because of the type error
self.assertEqual(1, a.num_runs)
def test_tracking_url(self):
tracking_url = 'http://test_url.com/'
class A(Task):
has_run = False
def complete(self):
return self.has_run
def run(self):
self.set_tracking_url(tracking_url)
self.has_run = True
a = A()
self.assertTrue(self.w.add(a))
self.assertTrue(self.w.run())
tasks = self.sch.task_list('DONE', '')
self.assertEqual(1, len(tasks))
self.assertEqual(tracking_url, tasks[a.task_id]['tracking_url'])
def test_fail(self):
class CustomException(BaseException):
def __init__(self, msg):
self.msg = msg
class A(Task):
def run(self):
self.has_run = True
raise CustomException('bad things')
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertFalse(self.w.run())
self.assertTrue(a.has_run)
self.assertFalse(b.has_run)
def test_unknown_dep(self):
# see related test_remove_dep test (grep for it)
class A(ExternalTask):
def complete(self):
return False
class C(Task):
def complete(self):
return True
def get_b(dep):
class B(Task):
def requires(self):
return dep
def run(self):
self.has_run = True
def complete(self):
return False
b = B()
b.has_run = False
return b
b_a = get_b(A())
b_c = get_b(C())
self.assertTrue(self.w.add(b_a))
# So now another worker goes in and schedules C -> B
# This should remove the dep A -> B but will screw up the first worker
self.assertTrue(self.w2.add(b_c))
self.assertFalse(self.w.run()) # should not run anything - the worker should detect that A is broken
self.assertFalse(b_a.has_run)
# not sure what should happen??
# self.w2.run() # should run B since C is fulfilled
# self.assertTrue(b_c.has_run)
def test_unfulfilled_dep(self):
class A(Task):
def complete(self):
return self.done
def run(self):
self.done = True
def get_b(a):
class B(A):
def requires(self):
return a
b = B()
b.done = False
a.done = True
return b
a = A()
b = get_b(a)
self.assertTrue(self.w.add(b))
a.done = False
self.w.run()
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_gets_missed_work(self):
class A(Task):
done = False
def complete(self):
return self.done
def run(self):
self.done = True
a = A()
self.assertTrue(self.w.add(a))
# simulate a missed get_work response
self.assertEqual(a.task_id, self.sch.get_work(worker='X')['task_id'])
self.assertTrue(self.w.run())
self.assertTrue(a.complete())
def test_avoid_infinite_reschedule(self):
class A(Task):
def complete(self):
return False
class B(Task):
def complete(self):
return False
def requires(self):
return A()
self.assertTrue(self.w.add(B()))
self.assertFalse(self.w.run())
def test_fails_registering_signal(self):
with mock.patch('luigi.worker.signal', spec=['signal']):
# mock will raise an attribute error getting signal.SIGUSR1
Worker()
def test_allow_reschedule_with_many_missing_deps(self):
class A(Task):
""" Task that must run twice to succeed """
i = luigi.IntParameter()
runs = 0
def complete(self):
return self.runs >= 2
def run(self):
self.runs += 1
class B(Task):
done = False
def requires(self):
return map(A, range(20))
def complete(self):
return self.done
def run(self):
self.done = True
b = B()
w = Worker(scheduler=self.sch, worker_id='X', max_reschedules=1)
self.assertTrue(w.add(b))
self.assertFalse(w.run())
# For b to be done, we must have rescheduled its dependencies to run them twice
self.assertTrue(b.complete())
self.assertTrue(all(a.complete() for a in b.deps()))
def test_interleaved_workers(self):
class A(DummyTask):
pass
a = A()
class B(DummyTask):
def requires(self):
return a
ExternalB = luigi.task.externalize(B)
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(eb))
logging.debug("RUNNING BROKEN WORKER")
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
logging.debug("RUNNING FUNCTIONAL WORKER")
self.assertTrue(w.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_interleaved_workers2(self):
# two tasks without dependencies, one external, one not
class B(DummyTask):
pass
ExternalB = luigi.task.externalize(B)
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w2.add(eb))
self.assertTrue(w.add(b))
self.assertTrue(w2.run())
self.assertFalse(b.complete())
self.assertTrue(w.run())
self.assertTrue(b.complete())
def test_interleaved_workers3(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w.add(a))
self.assertTrue(w2.add(b))
threading.Thread(target=w.run).start()
self.assertTrue(w2.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_die_for_non_unique_pending(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(b))
self.assertEqual(w._get_work()[0], a.task_id)
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
def test_complete_exception(self):
"Tests that a task is still scheduled if its sister task crashes in the complete() method"
class A(DummyTask):
def complete(self):
raise Exception("doh")
a = A()
class C(DummyTask):
pass
c = C()
class B(DummyTask):
def requires(self):
return a, c
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertFalse(a.has_run)
def test_requires_exception(self):
class A(DummyTask):
def requires(self):
raise Exception("doh")
a = A()
class D(DummyTask):
pass
d = D()
class C(DummyTask):
def requires(self):
return d
c = C()
class B(DummyTask):
def requires(self):
return c, a
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertTrue(d.has_run)
self.assertFalse(a.has_run)
def test_run_csv_batch_job(self):
completed = set()
class CsvBatchJob(luigi.Task):
values = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
def run(self):
completed.update(self.values.split(','))
self.has_run = True
def complete(self):
return all(value in completed for value in self.values.split(','))
tasks = [CsvBatchJob(str(i)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertFalse(task.has_run)
def test_run_max_batch_job(self):
completed = set()
class MaxBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return any(self.value <= ran for ran in completed)
tasks = [MaxBatchJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
def test_run_batch_job_unbatched(self):
completed = set()
class MaxNonBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
batchable = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return self.value in completed
tasks = [MaxNonBatchJob((i,)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertTrue(task.has_run)
def test_run_batch_job_limit_batch_size(self):
completed = set()
runs = []
class CsvLimitedBatchJob(luigi.Task):
value = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
max_batch_size = 4
def run(self):
completed.update(self.value.split(','))
runs.append(self)
def complete(self):
return all(value in completed for value in self.value.split(','))
tasks = [CsvLimitedBatchJob(str(i)) for i in range(11)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertEqual(3, len(runs))
def test_fail_max_batch_job(self):
class MaxBatchFailJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
self.has_run = True
assert False
def complete(self):
return False
tasks = [MaxBatchFailJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
for task in tasks:
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
self.assertEqual({task.task_id for task in tasks}, set(self.sch.task_list('FAILED', '')))
def test_gracefully_handle_batch_method_failure(self):
class BadBatchMethodTask(DummyTask):
priority = 10
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
bad_tasks = [BadBatchMethodTask(i) for i in range(5)]
good_tasks = [DummyTask()]
all_tasks = good_tasks + bad_tasks
self.assertFalse(any(task.complete() for task in all_tasks))
worker = Worker(scheduler=Scheduler(retry_count=1), keep_alive=True)
for task in all_tasks:
self.assertTrue(worker.add(task))
self.assertFalse(worker.run())
self.assertFalse(any(task.complete() for task in bad_tasks))
# we only get to run the good task if the bad task failures were handled gracefully
self.assertTrue(all(task.complete() for task in good_tasks))
def test_post_error_message_for_failed_batch_methods(self):
class BadBatchMethodTask(DummyTask):
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
tasks = [BadBatchMethodTask(1), BadBatchMethodTask(2)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
failed_ids = set(self.sch.task_list('FAILED', ''))
self.assertEqual({task.task_id for task in tasks}, failed_ids)
self.assertTrue(all(self.sch.fetch_error(task_id)['error'] for task_id in failed_ids))
class WorkerKeepAliveTests(LuigiTestCase):
def setUp(self):
self.sch = Scheduler()
super(WorkerKeepAliveTests, self).setUp()
def _worker_keep_alive_test(self, first_should_live, second_should_live, task_status=None, **worker_args):
worker_args.update({
'scheduler': self.sch,
'worker_processes': 0,
'wait_interval': 0.01,
'wait_jitter': 0.0,
})
w1 = Worker(worker_id='w1', **worker_args)
w2 = Worker(worker_id='w2', **worker_args)
with w1 as worker1, w2 as worker2:
worker1.add(DummyTask())
t1 = threading.Thread(target=worker1.run)
t1.start()
worker2.add(DummyTask())
t2 = threading.Thread(target=worker2.run)
t2.start()
if task_status:
self.sch.add_task(worker='DummyWorker', task_id=DummyTask().task_id, status=task_status)
# allow workers to run their get work loops a few times
time.sleep(0.1)
try:
self.assertEqual(first_should_live, t1.isAlive())
self.assertEqual(second_should_live, t2.isAlive())
finally:
# mark the task done so the worker threads will die
self.sch.add_task(worker='DummyWorker', task_id=DummyTask().task_id, status='DONE')
t1.join()
t2.join()
def test_no_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
)
def test_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=True,
second_should_live=True,
keep_alive=True,
)
def test_keep_alive_count_uniques(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
keep_alive=True,
count_uniques=True,
)
def test_keep_alive_count_last_scheduled(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=True,
keep_alive=True,
count_last_scheduled=True,
)
def test_keep_alive_through_failure(self):
self._worker_keep_alive_test(
first_should_live=True,
second_should_live=True,
keep_alive=True,
task_status='FAILED',
)
def test_do_not_keep_alive_through_disable(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
keep_alive=True,
task_status='DISABLED',
)
class WorkerInterruptedTest(unittest.TestCase):
def setUp(self):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
requiring_sigusr = unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
'signal.SIGUSR1 not found on this system')
def _test_stop_getting_new_work(self, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
worker.handle_interrupt(signal.SIGUSR1, None)
worker.run()
self.assertFalse(d.complete())
@requiring_sigusr
def test_stop_getting_new_work(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch))
@requiring_sigusr
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=False, assistant=True))
@requiring_sigusr
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=True, assistant=True))
def test_existence_of_disabling_option(self):
# any code equivalent of `os.kill(os.getpid(), signal.SIGUSR1)`
# seem to give some sort of a "InvocationError"
Worker(no_install_shutdown_handler=True)
@with_config({"worker": {"no_install_shutdown_handler": "True"}})
def test_can_run_luigi_in_thread(self):
class A(DummyTask):
pass
task = A()
# Note that ``signal.signal(signal.SIGUSR1, fn)`` can only be called in the main thread.
# So if we do not disable the shutdown handler, this would fail.
t = threading.Thread(target=lambda: luigi.build([task], local_scheduler=True))
t.start()
t.join()
self.assertTrue(task.complete())
class WorkerDisabledTest(LuigiTestCase):
def make_sch(self):
return Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
def _test_stop_getting_new_work_build(self, sch, worker):
"""
I got motivated to create this test case when I saw that the
execution_summary crashed after my first attemted solution.
"""
class KillWorkerTask(luigi.Task):
did_actually_run = False
def run(self):
sch.disable_worker('my_worker_id')
KillWorkerTask.did_actually_run = True
class Factory(object):
def create_local_scheduler(self, *args, **kwargs):
return sch
def create_worker(self, *args, **kwargs):
return worker
luigi.build([KillWorkerTask()], worker_scheduler_factory=Factory(), local_scheduler=True)
self.assertTrue(KillWorkerTask.did_actually_run)
def _test_stop_getting_new_work_manual(self, sch, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
sch.disable_worker('my_worker_id')
worker.run() # Note: Test could fail by hanging on this line
self.assertFalse(d.complete())
def _test_stop_getting_new_work(self, **worker_kwargs):
worker_kwargs['worker_id'] = 'my_worker_id'
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_manual(sch, Worker(**worker_kwargs))
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_build(sch, Worker(**worker_kwargs))
def test_stop_getting_new_work_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=False)
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(keep_alive=False, assistant=True)
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=True)
class DynamicDependenciesTest(unittest.TestCase):
n_workers = 1
timeout = float('inf')
def setUp(self):
self.p = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.p)
def test_dynamic_dependencies(self, use_banana_task=False):
t0 = time.time()
t = DynamicRequires(p=self.p, use_banana_task=use_banana_task)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
# loop through output and verify
with t.output().open('r') as f:
for i in range(7):
self.assertEqual(f.readline().strip(), '%d: Done!' % i)
self.assertTrue(time.time() - t0 < self.timeout)
def test_dynamic_dependencies_with_namespace(self):
self.test_dynamic_dependencies(use_banana_task=True)
def test_dynamic_dependencies_other_module(self):
t = DynamicRequiresOtherModule(p=self.p)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
class DynamicDependenciesWithMultipleWorkersTest(DynamicDependenciesTest):
n_workers = 100
timeout = 3.0 # We run 7 tasks that take 0.5s each so it should take less than 3.5s
class WorkerPingThreadTests(unittest.TestCase):
def test_ping_retry(self):
""" Worker ping fails once. Ping continues to try to connect to scheduler
Kind of ugly since it uses actual timing with sleep to test the thread
"""
sch = Scheduler(
retry_delay=100,
remove_delay=1000,
worker_disconnect_delay=10,
)
self._total_pings = 0 # class var so it can be accessed from fail_ping
def fail_ping(worker):
# this will be called from within keep-alive thread...
self._total_pings += 1
raise Exception("Some random exception")
sch.ping = fail_ping
with Worker(
scheduler=sch,
worker_id="foo",
ping_interval=0.01 # very short between pings to make test fast
):
# let the keep-alive thread run for a bit...
time.sleep(0.1) # yes, this is ugly but it's exactly what we need to test
self.assertTrue(
self._total_pings > 1,
msg="Didn't retry pings (%d pings performed)" % (self._total_pings,)
)
def test_ping_thread_shutdown(self):
with Worker(ping_interval=0.01) as w:
self.assertTrue(w._keep_alive_thread.is_alive())
self.assertFalse(w._keep_alive_thread.is_alive())
def email_patch(test_func, email_config=None):
EMAIL_CONFIG = {"core": {"error-email": "not-a-real-email-address-for-test-only"}, "email": {"force-send": "true"}}
if email_config is not None:
EMAIL_CONFIG.update(email_config)
emails = []
def mock_send_email(sender, recipients, msg):
emails.append(msg)
@with_config(EMAIL_CONFIG)
@functools.wraps(test_func)
@mock.patch('smtplib.SMTP')
def run_test(self, smtp):
smtp().sendmail.side_effect = mock_send_email
test_func(self, emails)
return run_test
def custom_email_patch(config):
return functools.partial(email_patch, email_config=config)
class WorkerEmailTest(LuigiTestCase):
def run(self, result=None):
super(WorkerEmailTest, self).setUp()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as self.worker:
super(WorkerEmailTest, self).run(result)
@email_patch
def test_connection_error(self, emails):
sch = RemoteScheduler('http://tld.invalid:1337', connect_timeout=1)
self.waits = 0
def dummy_wait():
self.waits += 1
sch._wait = dummy_wait
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
with Worker(scheduler=sch) as worker:
try:
worker.add(a)
except RPCError:
self.assertEqual(self.waits, 2) # should attempt to add it 3 times
self.assertNotEqual(emails, [])
self.assertTrue(emails[0].find("Luigi: Framework error while scheduling %s" % (a,)) != -1)
else:
self.fail()
@email_patch
def test_complete_error(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_error_email_batch(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_error_email_batch_to_owner(self, emails):
class A(DummyTask):
owner_email = 'a_owner@test.com'
def complete(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue(any(
"1 scheduling failure" in email and 'a_owner@test.com' in email
for email in emails))
@email_patch
def test_requires_error(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_requires_error_email_batch(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@email_patch
def test_complete_return_value(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_return_value_email_batch(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@email_patch
def test_run_error(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
a = A()
luigi.build([a], workers=1, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_email_batch(self, emails):
class A(luigi.Task):
owner_email = ['a@test.com', 'b@test.com']
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(3, len(emails))
self.assertTrue(any('a@test.com' in email for email in emails))
self.assertTrue(any('b@test.com' in email for email in emails))
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_batch_email_string(self, emails):
class A(luigi.Task):
owner_email = 'a@test.com'
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(2, len(emails))
self.assertTrue(any('a@test.com' in email for email in emails))
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_no_email(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
luigi.build([A()], workers=1, local_scheduler=True)
self.assertFalse(emails)
@email_patch
def test_task_process_dies_with_email(self, emails):
a = SendSignalTask(signal.SIGKILL)
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("died unexpectedly with exit code -9") != -1)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_process_dies_no_email(self, emails):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@email_patch
def test_task_times_out(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("timed out after 0.0001 seconds and was terminated.") != -1)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_times_out_no_email(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
luigi.build([A()], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@with_config(dict(worker=dict(retry_external_tasks='true')))
@email_patch
def test_external_task_retries(self, emails):
"""
Test that we do not send error emails on the failures of external tasks
"""
class A(luigi.ExternalTask):
pass
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(emails, [])
@email_patch
def test_no_error(self, emails):
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertTrue(a.complete())
@custom_email_patch({"core": {"error-email": "not-a-real-email-address-for-test-only", 'email-type': 'none'}})
def test_disable_emails(self, emails):
class A(luigi.Task):
def complete(self):
raise Exception("b0rk")
self.worker.add(A())
self.assertEqual(emails, [])
class RaiseSystemExit(luigi.Task):
def run(self):
raise SystemExit("System exit!!")
class SendSignalTask(luigi.Task):
signal = luigi.IntParameter()
def run(self):
os.kill(os.getpid(), self.signal)
class HangTheWorkerTask(luigi.Task):
worker_timeout = luigi.IntParameter(default=None)
def run(self):
while True:
pass
def complete(self):
return False
class MultipleWorkersTest(unittest.TestCase):
@unittest.skip('Always skip. There are many intermittent failures')
# This pass under python3 when run as `nosetests test/worker_test.py`
# but not as `nosetests test`. Probably some side effect on previous tests
@unittest.skipIf(six.PY3, 'This test fail on python3 when run with tox.')
def test_multiple_workers(self):
# Test using multiple workers
# Also test generating classes dynamically since this may reflect issues with
# various platform and how multiprocessing is implemented. If it's using os.fork
# under the hood it should be fine, but dynamic classses can't be pickled, so
# other implementations of multiprocessing (using spawn etc) may fail
class MyDynamicTask(luigi.Task):
x = luigi.Parameter()
def run(self):
time.sleep(0.1)
t0 = time.time()
luigi.build([MyDynamicTask(i) for i in range(100)], workers=100, local_scheduler=True)
self.assertTrue(time.time() < t0 + 5.0) # should ideally take exactly 0.1s, but definitely less than 10.0
def test_zero_workers(self):
d = DummyTask()
luigi.build([d], workers=0, local_scheduler=True)
self.assertFalse(d.complete())
def test_system_exit(self):
# This would hang indefinitely before this fix:
# https://github.com/spotify/luigi/pull/439
luigi.build([RaiseSystemExit()], workers=2, local_scheduler=True)
def test_term_worker(self):
luigi.build([SendSignalTask(signal.SIGTERM)], workers=2, local_scheduler=True)
def test_kill_worker(self):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
def test_purge_multiple_workers(self):
w = Worker(worker_processes=2, wait_interval=0.01)
t1 = SendSignalTask(signal.SIGTERM)
t2 = SendSignalTask(signal.SIGKILL)
w.add(t1)
w.add(t2)
w._run_task(t1.task_id)
w._run_task(t2.task_id)
time.sleep(1.0)
w._handle_next_task()
w._handle_next_task()
w._handle_next_task()
def test_stop_worker_kills_subprocesses(self):
with Worker(worker_processes=2) as w:
hung_task = HangTheWorkerTask()
w.add(hung_task)
w._run_task(hung_task.task_id)
pids = [p.pid for p in w._running_tasks.values()]
self.assertEqual(1, len(pids))
pid = pids[0]
def is_running():
return pid in {p.pid for p in psutil.Process().children()}
self.assertTrue(is_running())
self.assertFalse(is_running())
def test_time_out_hung_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=2, local_scheduler=True)
def test_time_out_hung_single_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=1, local_scheduler=True)
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/72953986')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_default_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask()
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 5
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 6
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/76645264')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_override_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=10)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 10
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 11
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
class Dummy2Task(Task):
p = luigi.Parameter()
def output(self):
return MockTarget(self.p)
def run(self):
f = self.output().open('w')
f.write('test')
f.close()
class AssistantTest(unittest.TestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.assistant = Worker(scheduler=self.sch, worker_id='Y', assistant=True)
with Worker(scheduler=self.sch, worker_id='X') as w:
self.w = w
super(AssistantTest, self).run(result)
def test_get_work(self):
d = Dummy2Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assistant.run()
self.assertTrue(d.complete())
def test_bad_job_type(self):
class Dummy3Task(Dummy2Task):
task_family = 'UnknownTaskFamily'
d = Dummy3Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assertFalse(self.assistant.run())
self.assertFalse(d.complete())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [d.task_id])
def test_unimported_job_type(self):
MODULE_CONTENTS = b'''
import luigi
class UnimportedTask(luigi.Task):
def complete(self):
return False
'''
reg = luigi.task_register.Register._get_reg()
class UnimportedTask(luigi.Task):
task_module = None # Set it here, so it's generally settable
luigi.task_register.Register._set_reg(reg)
task = UnimportedTask()
# verify that it can't run the task without the module info necessary to import it
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
# check that it can import with the right module
with temporary_unloaded_module(MODULE_CONTENTS) as task.task_module:
self.w.add(task)
self.assertTrue(self.assistant.run())
self.assertEqual(list(self.sch.task_list('DONE', '').keys()), [task.task_id])
def test_unimported_job_sends_failure_message(self):
class NotInAssistantTask(luigi.Task):
task_family = 'Unknown'
task_module = None
task = NotInAssistantTask()
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
self.assertTrue(self.sch.fetch_error(task.task_id)['error'])
class ForkBombTask(luigi.Task):
depth = luigi.IntParameter()
breadth = luigi.IntParameter()
p = luigi.Parameter(default=(0, )) # ehm for some weird reason [0] becomes a tuple...?
def output(self):
return MockTarget('.'.join(map(str, self.p)))
def run(self):
with self.output().open('w') as f:
f.write('Done!')
def requires(self):
if len(self.p) < self.depth:
for i in range(self.breadth):
yield ForkBombTask(self.depth, self.breadth, self.p + (i, ))
class TaskLimitTest(unittest.TestCase):
def tearDown(self):
MockFileSystem().remove('')
@with_config({'core': {'worker-task-limit': '6'}})
def test_task_limit_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertFalse(t.complete())
leaf_tasks = [ForkBombTask(3, 2, branch) for branch in [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)]]
self.assertEqual(3, sum(t.complete() for t in leaf_tasks),
"should have gracefully completed as much as possible even though the single last leaf didn't get scheduled")
@with_config({'core': {'worker-task-limit': '7'}})
def test_task_limit_not_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
def test_no_task_limit(self):
w = Worker()
t = ForkBombTask(4, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
class WorkerConfigurationTest(unittest.TestCase):
def test_asserts_for_worker(self):
"""
Test that Worker() asserts that it's sanely configured
"""
Worker(wait_interval=1) # This shouldn't raise
self.assertRaises(AssertionError, Worker, wait_interval=0)
class WorkerWaitJitterTest(unittest.TestCase):
@with_config({'worker': {'wait_jitter': '10.0'}})
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter(self, mock_sleep, mock_random):
""" verify configured jitter amount """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 2.0
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(3.0)
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter_default(self, mock_sleep, mock_random):
""" verify default jitter is as expected """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 3.3
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(4.3)
class KeyboardInterruptBehaviorTest(LuigiTestCase):
def test_propagation_when_executing(self):
"""
Ensure that keyboard interrupts causes luigi to quit when you are
executing tasks.
TODO: Add a test that tests the multiprocessing (--worker >1) case
"""
class KeyboardInterruptTask(luigi.Task):
def run(self):
raise KeyboardInterrupt()
cmd = 'KeyboardInterruptTask --local-scheduler --no-lock'.split(' ')
self.assertRaises(KeyboardInterrupt, luigi_run, cmd)
def test_propagation_when_scheduling(self):
"""
Test that KeyboardInterrupt causes luigi to quit while scheduling.
"""
class KeyboardInterruptTask(luigi.Task):
def complete(self):
raise KeyboardInterrupt()
class ExternalKeyboardInterruptTask(luigi.ExternalTask):
def complete(self):
raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, luigi_run,
['KeyboardInterruptTask', '--local-scheduler', '--no-lock'])
self.assertRaises(KeyboardInterrupt, luigi_run,
['ExternalKeyboardInterruptTask', '--local-scheduler', '--no-lock'])
class WorkerPurgeEventHandlerTest(unittest.TestCase):
@mock.patch('luigi.worker.TaskProcess')
def test_process_killed_handler(self, task_proc):
result = []
@HangTheWorkerTask.event_handler(Event.PROCESS_FAILURE)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker()
task = HangTheWorkerTask()
task_process = mock.MagicMock(is_alive=lambda: False, exitcode=-14, task=task)
task_proc.return_value = task_process
w.add(task)
w._run_task(task.task_id)
w._handle_next_task()
self.assertEqual(result, [task])
@mock.patch('luigi.worker.time')
def test_timeout_handler(self, mock_time):
result = []
@HangTheWorkerTask.event_handler(Event.TIMEOUT)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=1)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 3
w._handle_next_task()
self.assertEqual(result, [task])
class PerTaskRetryPolicyBehaviorTest(LuigiTestCase):
def setUp(self):
super(PerTaskRetryPolicyBehaviorTest, self).setUp()
self.per_task_retry_count = 3
self.default_retry_count = 1
self.sch = Scheduler(retry_delay=0.1, retry_count=self.default_retry_count, prune_on_get_work=True)
def test_with_all_disabled_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_all_disabled_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e2))
self.assertTrue(w3.add(e1))
self.assertFalse(w3.run())
self.assertFalse(w2.run())
self.assertTrue(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_includes_success_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on single worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_includes_success_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e1))
self.assertTrue(w3.add(s1))
self.assertTrue(w3.run())
self.assertFalse(w2.run())
self.assertTrue(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_single_worker(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_multiple_workers(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(s1))
self.assertTrue(w2.run())
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
|
|
# eva_vid
import tensorflow as tf
import cv2
import numpy as np
from keras.backend import set_image_dim_ordering
from keras.models import load_model
import keras
from PIL import ImageDraw
from PIL import ImageFilter
from PIL import ImageOps
import time
from functools import wraps
from random import randint
import os
import sys
import datetime
import settings # hy: collection of global variables
import tools
from sklearn import datasets
import math
import imutils
from PIL import Image # hy: create video with images
# from keras.models import Model
# from keras.callbacks import ModelCheckpoint, LearningRateScheduler
import seg_arch as u_a
import re
PROJ_DIR = os.path.dirname(os.path.abspath(__file__))
# PROJ_DIR = '/home/occupancy/'
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("IMAGE_SIZE", "160", "input image size")
tf.flags.DEFINE_string('seg_model_search_p', PROJ_DIR + '/testbench/1/', "path to log file")
tf.flags.DEFINE_string('save_res_path', PROJ_DIR + '/testbench/k_imgs/', "path to save res images")
tf.flags.DEFINE_bool('with_gt', "True", "test with ground truth: True/ False") #
tf.flags.DEFINE_bool('CLOSE_ALL', "False", "print Info level 0 on: True/ False")
tf.flags.DEFINE_bool('INFO_0', "False", "print Info level 0 on: True/ False")
tf.flags.DEFINE_bool('DEBUG', "False", "print Info level 0 on: True/ False")
print 'Proj dir:', PROJ_DIR
print 'seg_model_search_p', FLAGS.seg_model_search_p
# Seg_MODEL_to_load = '913_10-0.03' + '.hdf5'
Seg_MODEL_to_load = '718_299-0.02' + '.hdf5'
# Seg_MODEL_to_load = 'No26_160_18799_29-0.13' + '.hdf5'
# seg_model_search_p = os.path.join(PROJ_DIR,'testbench/1/1/')
do_classification = True
result_for_table = False
use_pretrained_cl_model = False
thresh_res = 50 #
search_str = 'cmp'
in_ch = 1
border = 0
def get_model_index(path, search_by):
import re
m = os.path.basename(path)
found_index = re.search(search_by, m)
f_wo_ext = os.path.splitext(path)[0]
if found_index:
index = found_index.start() + 1
f_wo_ext = os.path.basename(f_wo_ext)[index:]
else:
f_wo_ext = os.path.basename(f_wo_ext)
return f_wo_ext
def add_blueOverlay(im_base, mask):
# fill the Blue channel (order 0,1,2) of im_base with mask
if len(im_base.shape) < 3:
im_base_new = cv2.cvtColor(im_base, cv2.COLOR_GRAY2RGB)
else:
im_base_new = im_base
if len(mask.shape) > 2:
mask = cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY)
im_base_new[:, :, 2] = mask
return im_base_new
def ROI(pred_thresh, im_crop, w, h, im_i=0, save_file=False):
"""
Finalize prediced ROI
:param pred_thresh: image region to analyze -
:param im_crop: RGB input
:param w,h: width and height to be resized to
:param im_i: serial ID of the input image
:param save_file: switch for saving file
Available options:
save_file = True/ False
"""
# input
fr = im_crop.copy()
im_crop_rz = cv2.resize(im_crop, (h, w))
blackbg = np.zeros((w, h, 3), np.uint8)
whitebg = np.zeros((w, h, 3), np.uint8)
whitebg.fill(255)
# new_mask = np.zeros((w, h, 3), np.uint8)
###############
def find_contour(obj_area, thresh):
gray = cv2.resize(np.uint8(obj_area), (h, w)) # direct
ret, gray = cv2.threshold(gray, thresh, 255, 0)
contours, hierarchy = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 1
if len(contours) > 0:
screen_out = True
else:
screen_out = False
return contours, screen_out
#########################################################
contours, screen_out = find_contour(pred_thresh, thresh_res) # 160/255 = 0.62
if screen_out:
# debug
# time for contour 0.000312089920044
fr_add_cont = cv2.resize(np.uint8(fr), (h, w))
# fr_ori_int8 = fr_add_cont.copy() # 0,1
# initialize
# roi_res = im_crop_rz.copy()
new_roi_res = im_crop_rz.copy()
old_roi_res = im_crop_rz.copy()
######################################
largest_areas = sorted(contours, key=cv2.contourArea)
for i in xrange(len(largest_areas)):
cv2.drawContours(fr_add_cont, [largest_areas[-i]], 0, (255, 255, 255, 255), -1)
cv2.drawContours(blackbg, [largest_areas[-i]], 0, (255, 255, 255, 255), -1)
new_mask = blackbg
x, y, bw, bh = 0, 0, 0, 0
r1, r2, c1, c2 = y, y + bh, x, x + bw
else:
print 'no contour found (1)'
screen_out = False
old_roi_res = np.zeros((w, h, 3), np.uint8)
old_roi_res.fill(255)
new_roi_res = old_roi_res.copy() # no roi
new_mask = np.zeros((w, h, 3), np.uint8) # full black mask
fr_add_cont = im_crop_rz.copy()
x, y, bw, bh = 0, 0, 0, 0
r1, r2, c1, c2 = y, y + bh, x, x + bw
# time for contour,add mask 0.00493407249451
#####################################################################
old_mask = blackbg
return fr_add_cont, old_mask, old_roi_res, new_mask, new_roi_res, r1, r2, c1, c2, screen_out, whitebg
def do_segment_video(model, im_crop_color, im_i, h, w, in_ch, show_bbox=False):
res_pass_list, res_fail_list = [], []
####### convert into the shape for seg model input
if FLAGS.INFO_0:
print w, h # 160,160
im_crop = cv2.resize(cv2.cvtColor(im_crop_color, cv2.COLOR_BGR2GRAY), (h, w))
im_crop = np.float32(im_crop.reshape(h, w))
im_crop = im_crop / 255.0
im_crop = tools.reduce_mean_stdev(im_crop, print_val=False)
image_tensor = np.zeros((3, 1, h, w)) # 3,1,320,320
image_tensor[1, :, :, :] = im_crop
if FLAGS.INFO_0:
print 'image_tensor.shape1:', image_tensor.shape # 1
image_tensor = image_tensor[1:-1, :, :, :] # hy: convert to (1, 1, 320, 320) to fit bg model input shape
# debug
# print '2-shape of test images', images.shape # (1, 1, 320, 320)
######################
images_original = image_tensor.copy()
# image_tensor = np.transpose(image_tensor,(0,2,1,3))
# print 'image_tensor.shape2:', image_tensor.shape #1
for i in range(0, image_tensor.shape[0]):
# print 'i:',i
start_p = time.time()
pred = model.predict(image_tensor[i, :, :, :].reshape(1, in_ch, h, w), batch_size=1) # video
# print 'model direct output,reshape:',result
end_p = time.time()
print 'time elapsed for model.predict:', (end_p - start_p), 's' # 1.24031400681 s
# print 'Test image', im_i, ", Min,Max: %f %f" % (np.min(pred), np.max(pred))
pred_int = pred[0, 0, :, :].reshape((h, w)) #
pred_255 = pred_int * 255 #
if FLAGS.INFO_0:
print '# show segment result for frame', im_i
#############################################################################
# do_segmentVIDEO
fr_add_cont, old_mask, old_roi_res, new_mask, pre_tensor, \
\
r1, r2, c1, c2, screen_out, roi_whitebg = ROI(pred_255, im_crop_color, h, w, im_i=im_i, save_file=False)
# print 'time for write',tmp_time2-tmp_time1 # 0.00110197067261, 7.58029007912(no write), 7.33648395538(use write)
return pred_int, pred_255, fr_add_cont, old_mask, old_roi_res, new_mask, pre_tensor, r1, r2, c1, c2, screen_out, roi_whitebg
def create_stacked_n_col_images(prefix, fn, list_of_imgs, winname, save_im=False): # 2
width = len(list_of_imgs)
max_r, c_comb, dim_im = 0, 0, 2
for im in list_of_imgs:
if len(im.shape) == 3:
r_im, c_im, dim_im = im.shape
else:
r_im, c_im = im.shape
c_comb += c_im
if r_im > max_r:
max_r = r_im
r_comb = max_r
frame_border = 1
c_comb = c_comb + (width - 1) * frame_border
comb_im = np.zeros(shape=(r_comb, c_comb, dim_im), dtype=np.uint8)
white = np.zeros(shape=(r_comb, frame_border, dim_im), dtype=np.uint8)
white2 = np.zeros(shape=(r_comb, frame_border), dtype=np.uint8)
white.fill(255)
white2.fill(255)
current_column = 0
for im in list_of_imgs:
if len(im.shape) == 3:
comb_im[:(im.shape[0]), current_column:current_column + im.shape[1]] = im
if current_column + im.shape[1] < c_comb:
comb_im[:(im.shape[0]),
current_column + im.shape[1]:current_column + im.shape[1] + frame_border] = white
else:
comb_im[:(im.shape[0]), current_column:current_column + im.shape[1]] = im[:, :, None]
if current_column + im.shape[1] < c_comb:
comb_im[:(im.shape[0]),
current_column + im.shape[1]:current_column + im.shape[1] + frame_border] = white2[:, :, None]
current_column = current_column + im.shape[1] + frame_border
if not FLAGS.CLOSE_ALL:
cv2.imshow(winname, comb_im)
cv2.waitKey(5)
if save_im:
print 'comb im shape:', comb_im.shape
# filename = "images/file_%d.jpg"%d
filename = prefix + "frame%05d.png" % fn
cv2.imwrite(filename, comb_im)
return comb_im
def EVALUATE_VIDEO_seg_and_classify(seg_model_name, VIDEO_FILE, num_class, in_ch, step_show=False, save_imgs=False,
stop=False): # (v)
seg_model = os.path.join(FLAGS.seg_model_search_p, seg_model_name)
# print 'seg model loaded:', seg_model
video = cv2.VideoCapture(VIDEO_FILE) # hy: changed from cv2.VideoCapture()
video.set(1, 2) # hy: changed from 1,2000 which was for wheelchair test video,
# hy: propID=1 means 0-based index of the frame to be decoded/captured next
if not video.isOpened():
print "cannot find or open video file:", VIDEO_FILE
exit(-1)
eva_count = 0
video_frame_i = 0 #
confMat1_TEST = np.zeros((num_class, num_class), dtype=np.float) # hy collect detailed confusion matrix
confMat2_TEST = np.zeros((2, 2), dtype=np.float)
# load seg model
set_image_dim_ordering(dim_ordering='th') #
model = load_model(seg_model)
while True and not stop: # and video_frame_i < 850:
ret, frame = video.read()
if cv2.waitKey(1) & 0xFF == ord('q'): # hy:press key-q to quit
print 'key interrupt,press q again to quit completely'
break
if ret: # time for a loop 7.28790903091 start from here
h, w = frame.shape[0], frame.shape[1]
video_frame_i += 1
# print 'frame',video_frame_i
# print 'frame shape h,w:', h, w # 1536 2304
if video_frame_i % 5 == 0: # ' and video_frame_i > 3750:#> 1350 and video_frame_i < 1470:
eva_count += 1
# time for a loop 7.43529987335,7.09782910347 variously, start from here
crop_x1 = 0
crop_y1 = 0
crop_x2 = 0 + w # 2300 #1920
crop_y2 = 0 + h # 1536 #1080
frame_crop = frame[crop_y1:crop_y2, crop_x1:crop_x2]
frame_crop_color = frame_crop.copy()
# debug
################################################################################################################
pred_int, pred_255, fr_add_cont, old_mask, old_roi_res, new_mask, \
pre_tensor, r1, r2, c1, c2, screen_out, roi_whitebg \
= do_segment_video(model, frame_crop_color, video_frame_i, FLAGS.IMAGE_SIZE, FLAGS.IMAGE_SIZE,
in_ch, show_bbox=True)
# add blue overlay for better demo
tmp_h, tmp_w, tmp_ch = frame_crop_color.shape
base = cv2.resize(frame_crop_color, (tmp_h, tmp_w))
new_mask_display = cv2.resize(new_mask, (tmp_h, tmp_w))
fr_add_cont_overlay = add_blueOverlay(base, new_mask_display)
if screen_out:
# prefix = FLAGS.save_res_path + get_model_index(Seg_MODEL_to_load, search_by='-')
prefix = FLAGS.save_res_path
fn = eva_count
# VIDEO
s_size = (800,
600) # cv2.resize(pred_int, s_size) cv2.resize(old_mask, s_size) cv2.resize(roi_whitebg, s_size) new_mask frame_crop_color
list_of_imgs = [cv2.resize(fr_add_cont_overlay, s_size)]
winname = 'fr_with_newmask'
stacked_imgs = create_stacked_n_col_images(prefix, fn, list_of_imgs, winname, save_im=True)
# cv2.imshow('stacked_imgs',stacked_imgs)
################################################################################################################
if not step_show:
pass
else:
k = cv2.waitKey(30) & 0xFF
while True and not stop:
if k == ord('n'):
print 'add to fail_list'
# res_fail_list.append(read_path_im + os.path.basename(files_im[i]))
# res_fail_list.append(files[i])
break
elif k == ord('y'):
print 'add to pass_list'
save_ori_frame = True
save_imgs = True
if save_imgs:
if save_ori_frame:
# im_save = cv2.resize(frame_crop_color, (1920, 1080), interpolation=cv2.INTER_CUBIC) # upsampling
im_save = frame_crop_color # upsampling
# or save image_crop_roi
else:
im_save = cv2.resize(frame_crop_color, (h, w))
v_str = os.path.splitext(VIDEO_FILE)
cv2.imwrite(FLAGS.save_res_path + v_str + '_' + str(video_frame_i) + '.jpg',
im_save)
print 'saved to', FLAGS.save_res_path + v_str + '_' + str(video_frame_i) + '.jpg'
break
elif k == ord('q'): # ESC
break
else:
k = cv2.waitKey(30) & 0xFF
if k != 255:
print 'k:', k # 81-l, 83-r, 82-u, 84-d
if cv2.waitKey(1) & 0xFF == ord('q'):
print 'key interrupt, press again to close all windows'
break
cv2.waitKey(10) # required for roi_seg
else:
print 'video end'
video.release()
stop = True
stop = True
return stop
def main(_):
step_show = False
print 'Starting evaluation with k model', Seg_MODEL_to_load
# VIDEO_FILE = '/media/sf_shared/vids/2017-09-06_08.09.58.9.cam_55_4.event12.mp4'
# VIDEO_FILE = '/media/sf_shared_win/vids/01_2017-09-06_13.57.41.0.cam_55_3.event42_240_40s_T.mp4'
# VIDEO_FILE = '01_2017-09-06_13.57.41.0.cam_55_3.event42_240_40s_T.mp4'
VIDEO_FILE = '2017-09-06_13.57.41.5.cam_55_3.event56_empty.mp4'
# VIDEO_FILE = '2017-09-06_13.57.41.5.cam_55_4.event56_empty2.mp4'
stop = False
while not stop:
stop = EVALUATE_VIDEO_seg_and_classify(Seg_MODEL_to_load, VIDEO_FILE, num_class=2, in_ch=1, step_show=step_show,
save_imgs=True, stop=False)
if __name__ == '__main__':
tf.app.run()
|
|
#!/usr/bin/env python
""" grin searches text files.
"""
from __future__ import print_function
import argparse
import bisect
import fnmatch
import gzip
import io
import itertools
import os
import re
import shlex
import stat
import sys
import colorama
from six import b, int2byte, reraise
from six.moves import map, filter
def int2bytes(ints):
return b('').join(map(int2byte, ints))
#### Constants ####
__version__ = '1.2.1+xy2'
# Maintain the numerical order of these constants. We use them for sorting.
PRE = -1
MATCH = 0
POST = 1
# Use file(1)'s choices for what's text and what's not.
TEXTCHARS = int2bytes((7, 8, 9, 10, 12, 13, 27) + tuple(range(0x20, 0x100)))
ALLBYTES = int2bytes(range(256))
COLOR_TABLE = ['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan',
'white', 'default']
COLOR_STYLE = {
'filename': dict(fg="green", bold=True),
'searchterm': dict(fg="black", bg="yellow")
}
# gzip magic header bytes.
GZIP_MAGIC = b('\037\213')
# Target amount of data to read into memory at a time.
READ_BLOCKSIZE = 16 * 1024 * 1024
def is_binary_string(b):
"""Determine if a string is classified as binary rather than text.
Parameters
----------
bytes : str
Returns
-------
is_binary : bool
"""
return bool(b.translate(ALLBYTES, TEXTCHARS))
def get_line_offsets(block):
"""Compute the list of offsets in DataBlock 'block' which correspond to
the beginnings of new lines.
Parameters
----------
block : DataBlock
Returns
-------
ret : (offset list, count of lines in "current block")
"""
# Note: this implementation based on string.find() benchmarks about twice
# as fast as a list comprehension using re.finditer().
line_offsets = [0]
line_count = 0 # Count of lines inside range [block.start, block.end) *only*
s = block.data
while True:
next_newline = s.find('\n', line_offsets[-1])
if next_newline < 0:
# Tack on a final "line start" corresponding to EOF, if not done
# already. This makes it possible to determine the length of each
# line by computing a difference between successive elements.
if line_offsets[-1] < len(s):
line_offsets.append(len(s))
return line_offsets, line_count
else:
line_offsets.append(next_newline + 1)
# Keep track of the count of lines within the "current block"
if next_newline >= block.start and next_newline < block.end:
line_count += 1
def colorize(s, fg=None, bg=None, bold=False, underline=False, reverse=False):
"""Wraps a string with ANSI color escape sequences corresponding to the
style parameters given.
All of the color and style parameters are optional.
Parameters
----------
s : str
fg : str
Foreground color of the text. One of (black, red, green, yellow, blue,
magenta, cyan, white, default)
bg : str
Background color of the text. Color choices are the same as for fg.
bold : bool
Whether or not to display the text in bold.
underline : bool
Whether or not to underline the text.
reverse : bool
Whether or not to show the text in reverse video.
Returns
-------
A string with embedded color escape sequences.
"""
style_fragments = []
if fg in COLOR_TABLE:
# Foreground colors go from 30-39
style_fragments.append(COLOR_TABLE.index(fg) + 30)
if bg in COLOR_TABLE:
# Background colors go from 40-49
style_fragments.append(COLOR_TABLE.index(bg) + 40)
if bold:
style_fragments.append(1)
if underline:
style_fragments.append(4)
if reverse:
style_fragments.append(7)
style_start = '\x1b[' + ';'.join(map(str, style_fragments)) + 'm'
style_end = '\x1b[0m'
return style_start + s + style_end
class Options(dict):
"""Simple options."""
def __init__(self, *args, **kwds):
super(Options, self).__init__(*args, **kwds)
self.__dict__ = self
def default_options():
"""Populate the default options."""
opt = Options(
before_context=0,
after_context=0,
show_line_numbers=True,
show_match=True,
show_filename=True,
show_emacs=False,
skip_hidden_dirs=False,
skip_hidden_files=False,
skip_backup_files=True,
skip_dirs=set(),
skip_exts=set(),
skip_symlink_dirs=True,
skip_symlink_files=True,
binary_bytes=4096,
)
return opt
class DataBlock(object):
"""This class holds a block of data read from a file, along with
some preceding and trailing context.
Attributes
----------
data : byte string
start : int
Offset into 'data' where the "current block" begins; everything
prior to this is 'before' context bytes
end : int
Offset into 'data' where the "current block" ends; everything
after this is 'after' context bytes
before_count : int
Number of lines contained in data[:start]
is_last : bool
True if this is the final block in the file
"""
def __init__(self, data='', start=0, end=0, before_count=0, is_last=False):
self.data = data
self.start = start
self.end = end
self.before_count = before_count
self.is_last = is_last
EMPTY_DATABLOCK = DataBlock()
class GrepText(object):
"""Grep a single file for a regex by iterating over the lines in a file.
Attributes
----------
regex : compiled regex
options : Options or similar
"""
def __init__(self, regex, options=None):
# The compiled regex.
self.regex = regex
# An equivalent regex with multiline enabled.
self.regex_m = re.compile(regex.pattern, regex.flags | re.MULTILINE)
# The options object from parsing the configuration and command line.
if options is None:
options = default_options()
self.options = options
def read_block_with_context(self, prev, fp, fp_size):
"""Read a block of data from the file, along with some surrounding
context.
Parameters
----------
prev : DataBlock, or None
The result of the previous application of
``read_block_with_context()``, or None if this is the first block.
fp : filelike object
The source of block data.
fp_size : int or None
Size of the file in bytes, or None if the size could not be
determined.
Returns
-------
A DataBlock representing the "current" block along with context.
"""
if fp_size is None:
target_io_size = READ_BLOCKSIZE
block_main = fp.read(target_io_size)
is_last_block = len(block_main) < target_io_size
else:
remaining = max(fp_size - fp.tell(), 0)
target_io_size = min(READ_BLOCKSIZE, remaining)
block_main = fp.read(target_io_size)
is_last_block = target_io_size == remaining
if prev is None:
if is_last_block:
# FAST PATH: the entire file fits into a single block, so we
# can avoid the overhead of locating lines of 'before' and
# 'after' context.
result = DataBlock(data=block_main, start=0,
end=len(block_main), before_count=0,
is_last=True)
return result
else:
prev = EMPTY_DATABLOCK
# SLOW PATH: handle the general case of a large file which is split
# across multiple blocks.
# Look back into 'preceding' for some lines of 'before' context.
if prev.end == 0:
before_start = 0
before_count = 0
else:
before_start = prev.end - 1
before_count = 0
for i in range(self.options.before_context):
ofs = prev.data.rfind('\n', 0, before_start)
before_start = ofs
before_count += 1
if ofs < 0:
break
before_start += 1
before_lines = prev.data[before_start:prev.end]
# Using readline() to force this block out to a newline boundary...
curr_block = (prev.data[prev.end:] + block_main + ('' if is_last_block
else fp.readline()))
# Read in some lines of 'after' context.
if is_last_block:
after_lines = ''
else:
after_lines_list = [fp.readline() for i in
range(self.options.after_context)]
after_lines = ''.join(after_lines_list)
result = DataBlock(
data=before_lines + curr_block + after_lines,
start=len(before_lines),
end=len(before_lines) + len(curr_block),
before_count=before_count,
is_last=is_last_block,
)
return result
def do_grep(self, fp):
"""Do a full grep.
Parameters
----------
fp : filelike object
An open filelike object.
Returns
-------
A list of 4-tuples (lineno, type (POST/PRE/MATCH), line, spans). For
each tuple of type MATCH, **spans** is a list of (start,end) positions
of substrings that matched the pattern.
"""
context = []
line_count = 0
if isinstance(fp, gzip.GzipFile):
fp_size = None # gzipped data is usually longer than the file
else:
try:
status = os.fstat(fp.fileno())
if stat.S_ISREG(status.st_mode):
fp_size = status.st_size
else:
fp_size = None
except (AttributeError, io.UnsupportedOperation):
# doesn't support fileno()
fp_size = None
block = self.read_block_with_context(None, fp, fp_size)
while block.end > block.start:
(block_line_count,
block_context) = self.do_grep_block(block, line_count -
block.before_count)
context += block_context
if block.is_last:
break
next_block = self.read_block_with_context(block, fp, fp_size)
if next_block.end > next_block.start:
if block_line_count is None:
# If the file contains N blocks, then in the best case we
# will need to compute line offsets for the first N-1
# blocks. Most files will fit within a single block, so if
# there are no regex matches then we can typically avoid
# computing *any* line offsets.
_, block_line_count = get_line_offsets(block)
line_count += block_line_count
block = next_block
unique_context = self.uniquify_context(context)
return unique_context
def do_grep_block(self, block, line_num_offset):
""" Grep a single block of file content.
Parameters
----------
block : DataBlock
A chunk of file data.
line_num_offset: int
The number of lines preceding block.data.
Returns
-------
Tuple of the form
(line_count, list of (lineno, type (POST/PRE/MATCH), line, spans).
'line_count' is either the number of lines in the block, or None if
the line_count was not computed. For each 4-tuple of type MATCH,
**spans** is a list of (start,end) positions of substrings that matched
the pattern.
"""
before = self.options.before_context
after = self.options.after_context
block_context = []
line_offsets = None
line_count = None
def build_match_context(match):
match_line_num = bisect.bisect(line_offsets, match.start() +
block.start) - 1
before_count = min(before, match_line_num)
after_count = min(after, (len(line_offsets) - 1) - match_line_num -
1)
match_line = \
block.data[line_offsets[match_line_num]:
line_offsets[match_line_num + 1]]
spans = [m.span() for m in self.regex.finditer(match_line)]
before_ctx = [(i + line_num_offset, PRE,
block.data[line_offsets[i]:line_offsets[i + 1]],
None) for i in range(match_line_num - before_count,
match_line_num)]
after_ctx = [(i + line_num_offset, POST,
block.data[line_offsets[i]:line_offsets[i + 1]],
None) for i in range(match_line_num + 1,
match_line_num + after_count +
1)]
match_ctx = [(match_line_num + line_num_offset, MATCH, match_line,
spans)]
return before_ctx + match_ctx + after_ctx
# Using re.MULTILINE here, so ^ and $ will work as expected.
for match in self.regex_m.finditer(block.data[block.start:block.end]):
# Computing line offsets is expensive, so we do it lazily. We
# don't take the extra CPU hit unless there's a regex match in the
# file.
if line_offsets is None:
(line_offsets, line_count) = get_line_offsets(block)
block_context += build_match_context(match)
return (line_count, block_context)
def uniquify_context(self, context):
""" Remove duplicate lines from the list of context lines.
"""
context.sort()
unique_context = []
for group in itertools.groupby(context, lambda ikl: ikl[0]):
for i, kind, line, matches in group[1]:
if kind == MATCH:
# Always use a match.
unique_context.append((i, kind, line, matches))
break
else:
# No match, only PRE and/or POST lines. Use the last one, which
# should be a POST since we've sorted it that way.
unique_context.append((i, kind, line, matches))
return unique_context
def report(self, context_lines, filename=None):
""" Return a string showing the results.
Parameters
----------
context_lines : list of tuples of (int, PRE/MATCH/POST, str, spans)
The lines of matches and context.
filename : str, optional
The name of the file being grepped, if one exists. If not provided,
the filename may not be printed out.
Returns
-------
text : str
This will end in a newline character if there is any text. Otherwise, it
might be an empty string without a newline.
"""
if len(context_lines) == 0:
return ''
lines = []
if not self.options.show_match:
# Just show the filename if we match.
line = '%s\n' % filename
lines.append(line)
else:
if self.options.show_filename and filename is not None and not self.options.show_emacs:
line = '%s:\n' % filename
if self.options.use_color:
line = colorize(line, **COLOR_STYLE.get('filename', {}))
lines.append(line)
if self.options.show_emacs:
template = '%(filename)s:%(lineno)s: %(line)s'
elif self.options.show_line_numbers:
template = '%(lineno)5s %(sep)s %(line)s'
else:
template = '%(line)s'
for i, kind, line, spans in context_lines:
if self.options.use_color and kind == MATCH and 'searchterm' in COLOR_STYLE:
style = COLOR_STYLE['searchterm']
orig_line = line[:]
total_offset = 0
for start, end in spans:
old_substring = orig_line[start:end]
start += total_offset
end += total_offset
color_substring = colorize(old_substring, **style)
line = line[:start] + color_substring + line[end:]
total_offset += len(color_substring) - len(old_substring)
ns = dict(
lineno = i+1,
sep = {PRE: '-', POST: '+', MATCH: ':'}[kind],
line = line,
filename = filename,
)
line = template % ns
lines.append(line)
if not line.endswith('\n'):
lines.append('\n')
text = ''.join(lines)
return text
def grep_a_file(self, filename, opener=open):
""" Grep a single file that actually exists on the file system.
Parameters
----------
filename : str
The file to open.
opener : callable
A function to call which creates a file-like object. It should
accept a filename and a mode argument like the builtin open()
function which is the default.
Returns
-------
report : str
The grep results as text.
"""
# Special-case stdin as "-".
if filename == '-':
f = sys.stdin
filename = '<STDIN>'
else:
# 'r' does the right thing for both open ('rt') and gzip.open
# ('rb')
f = opener(filename, 'r')
try:
unique_context = self.do_grep(f)
finally:
if filename != '-':
f.close()
report = self.report(unique_context, filename)
return report
class FileRecognizer(object):
"""Configurable way to determine what kind of file something is.
Attributes
----------
skip_hidden_dirs : bool
Whether to skip recursing into hidden directories, i.e. those starting
with a "." character.
skip_hidden_files : bool
Whether to skip hidden files.
skip_backup_files : bool
Whether to skip backup files.
skip_dirs : container of str
A list of directory names to skip. For example, one might want to skip
directories named "CVS".
skip_exts : container of str
A list of file extensions to skip. For example, some file names like
".so" are known to be binary and one may want to always skip them.
skip_symlink_dirs : bool
Whether to skip symlinked directories.
skip_symlink_files : bool
Whether to skip symlinked files.
binary_bytes : int
The number of bytes to check at the beginning and end of a file for
binary characters.
"""
def __init__(self, skip_hidden_dirs=False, skip_hidden_files=False,
skip_backup_files=False, skip_dirs=set(), skip_exts=set(),
skip_symlink_dirs=True, skip_symlink_files=True,
binary_bytes=4096):
self.skip_hidden_dirs = skip_hidden_dirs
self.skip_hidden_files = skip_hidden_files
self.skip_backup_files = skip_backup_files
self.skip_dirs = skip_dirs
# For speed, split extensions into the simple ones, that are
# compatible with os.path.splitext and hence can all be
# checked for in a single set-lookup, and the weirdos that
# can't and therefore must be checked for one at a time.
self.skip_exts_simple = set()
self.skip_exts_endswith = list()
for ext in skip_exts:
if os.path.splitext('foo.bar' + ext)[1] == ext:
self.skip_exts_simple.add(ext)
else:
self.skip_exts_endswith.append(ext)
self.skip_symlink_dirs = skip_symlink_dirs
self.skip_symlink_files = skip_symlink_files
self.binary_bytes = binary_bytes
def is_binary(self, filename):
"""Determine if a given file is binary or not.
Parameters
----------
filename : str
Returns
-------
is_binary : bool
"""
with open(filename, 'rb') as f:
return self._is_binary_file(f)
def _is_binary_file(self, f):
"""Determine if a given filelike object has binary data or not.
Parameters
----------
f : filelike object
Returns
-------
is_binary : bool
"""
try:
b = f.read(self.binary_bytes)
except Exception:
# When trying to read from something that looks like a gzipped
# file, it may be corrupt. If we do get an error, assume that the
# file is binary.
return True
return is_binary_string(b)
def is_gzipped_text(self, filename):
"""Determine if a given file is a gzip-compressed text file or not.
If the uncompressed file is binary and not text, then this will return
False.
Parameters
----------
filename : str
Returns
-------
is_gzipped_text : bool
"""
is_gzipped_text = False
with open(filename, 'rb') as f:
marker = f.read(2)
if marker == GZIP_MAGIC:
fp = gzip.open(filename)
try:
try:
is_gzipped_text = not self._is_binary_file(fp)
except IOError:
# We saw the GZIP_MAGIC marker, but it is not actually a gzip
# file.
is_gzipped_text = False
finally:
fp.close()
return is_gzipped_text
def recognize(self, filename):
"""Determine what kind of thing a filename represents.
It will also determine what a directory walker should do with the
file:
'text' :
It should should be grepped for the pattern and the matching
lines displayed.
'binary' :
The file is binary and should be either ignored or grepped
without displaying the matching lines depending on the
configuration.
'gzip' :
The file is gzip-compressed and should be grepped while
uncompressing.
'directory' :
The filename refers to a readable and executable directory that
should be recursed into if we are configured to do so.
'link' :
The filename refers to a symlink that should be skipped.
'unreadable' :
The filename cannot be read (and also, in the case of
directories, is not executable either).
'skip' :
The filename, whether a directory or a file, should be skipped
for any other reason.
Parameters
----------
filename : str
Returns
-------
kind : str
"""
try:
st_mode = os.stat(filename).st_mode
if stat.S_ISREG(st_mode):
return self.recognize_file(filename)
elif stat.S_ISDIR(st_mode):
return self.recognize_directory(filename)
else:
# We're only interested in regular files and directories.
# A named pipe in particular would be problematic, because
# it would cause open() to hang indefinitely.
return 'skip'
except OSError:
return 'unreadable'
def recognize_directory(self, filename):
"""Determine what to do with a directory."""
basename = os.path.split(filename)[-1]
if (self.skip_hidden_dirs and basename.startswith('.') and
basename not in (os.curdir, os.pardir)):
return 'skip'
if self.skip_symlink_dirs and os.path.islink(filename):
return 'link'
if basename in self.skip_dirs:
return 'skip'
return 'directory'
def recognize_file(self, filename):
""" Determine what to do with a file.
"""
basename = os.path.split(filename)[-1]
if self.skip_hidden_files and basename.startswith('.'):
return 'skip'
if self.skip_backup_files and basename.endswith('~'):
return 'skip'
if self.skip_symlink_files and os.path.islink(filename):
return 'link'
filename_nc = os.path.normcase(filename)
ext = os.path.splitext(filename_nc)[1]
if ext in self.skip_exts_simple or ext.startswith('.~'):
return 'skip'
for ext in self.skip_exts_endswith:
if filename_nc.endswith(ext):
return 'skip'
try:
if self.is_binary(filename):
if self.is_gzipped_text(filename):
return 'gzip'
else:
return 'binary'
else:
return 'text'
except (OSError, IOError):
return 'unreadable'
def walk(self, startpath):
""" Walk the tree from a given start path yielding all of the files (not
directories) and their kinds underneath it depth first.
Paths which are recognized as 'skip', 'link', or 'unreadable' will
simply be passed over without comment.
Parameters
----------
startpath : str
Yields
------
filename : str
kind : str
"""
kind = self.recognize(startpath)
if kind in ('binary', 'text', 'gzip'):
yield startpath, kind
# Not a directory, so there is no need to recurse.
return
elif kind == 'directory':
try:
basenames = os.listdir(startpath)
except OSError:
return
for basename in sorted(basenames):
path = os.path.join(startpath, basename)
for fn, k in self.walk(path):
yield fn, k
def get_grin_arg_parser(parser=None):
"""Create the command-line parser."""
if parser is None:
parser = argparse.ArgumentParser(
description="Search text files for a given regex pattern.",
epilog="Bug reports to <enthought-dev@mail.enthought.com>.",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-v', '--version', action='version',
version='grin %s' % __version__,
help="show program's version number and exit")
parser.add_argument(
'-i', '--ignore-case', action='append_const',
dest='re_flags', const=re.I, default=[],
help="ignore case in the regex")
parser.add_argument(
'-A', '--after-context', default=0, type=int,
help="the number of lines of context to show after the match [default=%(default)r]")
parser.add_argument(
'-B', '--before-context', default=0, type=int,
help="the number of lines of context to show before the match [default=%(default)r]")
parser.add_argument(
'-C', '--context', type=int,
help="the number of lines of context to show on either side of the match")
parser.add_argument(
'-I', '--include', default='*',
help="only search in files matching this glob [default=%(default)r]")
parser.add_argument(
'-n', '--line-number', action='store_true',
dest='show_line_numbers', default=True,
help="show the line numbers [default]")
parser.add_argument(
'-N', '--no-line-number', action='store_false',
dest='show_line_numbers', help="do not show the line numbers")
parser.add_argument(
'-H', '--with-filename', action='store_true',
dest='show_filename', default=True,
help="show the filenames of files that match [default]")
parser.add_argument(
'--without-filename', action='store_false',
dest='show_filename',
help="do not show the filenames of files that match")
parser.add_argument(
'--emacs', action='store_true',
dest='show_emacs',
help="print the filename with every match for easier parsing by e.g. Emacs")
parser.add_argument(
'-l', '--files-with-matches', action='store_false',
dest='show_match',
help="show only the filenames and not the texts of the matches")
parser.add_argument(
'-L', '--files-without-matches', action='store_true',
dest='show_match', default=False,
help="show the matches with the filenames")
parser.add_argument(
'--no-color', action='store_true', default=False,
help="do not use colorized output [default if piping the output]")
parser.add_argument(
'--use-color', action='store_false', dest='no_color',
help="use colorized output [default if outputting to a terminal]")
parser.add_argument(
'--force-color', action='store_true',
help="always use colorized output even when piping to something that "
"may not be able to handle it")
parser.add_argument(
'-s', '--no-skip-hidden-files',
dest='skip_hidden_files', action='store_false',
help="do not skip .hidden files")
parser.add_argument(
'--skip-hidden-files',
dest='skip_hidden_files', action='store_true', default=True,
help="do skip .hidden files [default]")
parser.add_argument(
'-b', '--no-skip-backup-files',
dest='skip_backup_files', action='store_false',
help="do not skip backup~ files [deprecated; edit --skip-exts]")
parser.add_argument(
'--skip-backup-files',
dest='skip_backup_files', action='store_true', default=True,
help="do skip backup~ files [default] [deprecated; edit --skip-exts]")
parser.add_argument('-S', '--no-skip-hidden-dirs', dest='skip_hidden_dirs',
action='store_false',
help="do not skip .hidden directories")
parser.add_argument('--skip-hidden-dirs', dest='skip_hidden_dirs',
default=True, action='store_true',
help="do skip .hidden directories [default]")
parser.add_argument('-d', '--skip-dirs',
default='CVS,RCS,.svn,.hg,.bzr,build,dist',
help="comma-separated list of directory names to skip [default=%(default)r]")
parser.add_argument('-D', '--no-skip-dirs', dest='skip_dirs',
action='store_const', const='',
help="do not skip any directories")
parser.add_argument('-e', '--skip-exts',
default='.pyc,.pyo,.so,.o,.a,.tgz,.tar.gz,.rar,.zip,~,#,.bak,.png,.jpg,.gif,.bmp,.tif,.tiff,.pyd,.dll,.exe,.obj,.lib',
help="comma-separated list of file extensions to skip [default=%(default)r]")
parser.add_argument('-E', '--no-skip-exts', dest='skip_exts',
action='store_const', const='',
help="do not skip any file extensions")
parser.add_argument('--no-follow', action='store_false',
dest='follow_symlinks',
default=False,
help="do not follow symlinks to directories and files [default]")
parser.add_argument('--follow', action='store_true', dest='follow_symlinks',
help="follow symlinks to directories and files")
parser.add_argument('-f', '--files-from-file', metavar="FILE",
help="read files to search from a file, one per line; - for stdin")
parser.add_argument('-0', '--null-separated', action='store_true',
help="filenames specified in --files-from-file are separated by NULs")
parser.add_argument('--sys-path', action='store_true',
help="search the directories on sys.path")
parser.add_argument('regex', help="the regular expression to search for")
parser.add_argument('files', nargs='*', help="the files to search")
return parser
def get_grind_arg_parser(parser=None):
"""Create the command-line parser for the find-like companion program."""
if parser is None:
parser = argparse.ArgumentParser(
description="Find text and binary files using similar rules as grin.",
epilog="Bug reports to <enthought-dev@mail.enthought.com>.",
)
parser.add_argument('-v', '--version', action='version', version='grin %s' % __version__,
help="show program's version number and exit")
parser.add_argument('-s', '--no-skip-hidden-files',
dest='skip_hidden_files', action='store_false',
help="do not skip .hidden files")
parser.add_argument('--skip-hidden-files',
dest='skip_hidden_files', action='store_true', default=True,
help="do skip .hidden files")
parser.add_argument('-b', '--no-skip-backup-files',
dest='skip_backup_files', action='store_false',
help="do not skip backup~ files [deprecated; edit --skip-exts]")
parser.add_argument('--skip-backup-files',
dest='skip_backup_files', action='store_true', default=True,
help="do skip backup~ files [default] [deprecated; edit --skip-exts]")
parser.add_argument('-S', '--no-skip-hidden-dirs', dest='skip_hidden_dirs',
action='store_false',
help="do not skip .hidden directories")
parser.add_argument('--skip-hidden-dirs', dest='skip_hidden_dirs',
default=True, action='store_true',
help="do skip .hidden directories")
parser.add_argument('-d', '--skip-dirs',
default='CVS,RCS,.svn,.hg,.bzr,build,dist',
help="comma-separated list of directory names to skip [default=%(default)r]")
parser.add_argument('-D', '--no-skip-dirs', dest='skip_dirs',
action='store_const', const='',
help="do not skip any directories")
parser.add_argument('-e', '--skip-exts',
default='.pyc,.pyo,.so,.o,.a,.tgz,.tar.gz,.rar,.zip,~,#,.bak,.png,.jpg,.gif,.bmp,.tif,.tiff,.pyd,.dll,.exe,.obj,.lib',
help="comma-separated list of file extensions to skip [default=%(default)r]")
parser.add_argument('-E', '--no-skip-exts', dest='skip_exts',
action='store_const', const='',
help="do not skip any file extensions")
parser.add_argument('--no-follow', action='store_false', dest='follow_symlinks',
default=False,
help="do not follow symlinks to directories and files [default]")
parser.add_argument('--follow', action='store_true', dest='follow_symlinks',
help="follow symlinks to directories and files")
parser.add_argument('-0', '--null-separated', action='store_true',
help="print the filenames separated by NULs")
parser.add_argument('--dirs', nargs='+', default=["."],
help="the directories to start from")
parser.add_argument('--sys-path', action='store_true',
help="search the directories on sys.path")
parser.add_argument('glob', default='*', nargs='?',
help="the glob pattern to match; you may need to quote this to prevent "
"the shell from trying to expand it [default=%(default)r]")
return parser
def get_recognizer(args):
"""Get the file recognizer object from the configured options."""
# Make sure we have empty sets when we have empty strings.
skip_dirs = set(filter(None, args.skip_dirs.split(',')))
skip_exts = set(filter(None, args.skip_exts.split(',')))
fr = FileRecognizer(
skip_hidden_files=args.skip_hidden_files,
skip_backup_files=args.skip_backup_files,
skip_hidden_dirs=args.skip_hidden_dirs,
skip_dirs=skip_dirs,
skip_exts=skip_exts,
skip_symlink_files=not args.follow_symlinks,
skip_symlink_dirs=not args.follow_symlinks,
)
return fr
def get_filenames(args):
"""Generate the filenames to grep.
Parameters
----------
args : Namespace
The commandline arguments object.
Yields
------
filename : str
kind : either 'text' or 'gzip'
What kind of file it is.
Raises
------
IOError if a requested file cannot be found.
"""
files = []
# If the user has given us a file with filenames, consume them first.
if args.files_from_file is not None:
if args.files_from_file == '-':
files_file = sys.stdin
should_close = False
elif os.path.exists(args.files_from_file):
files_file = open(args.files_from_file)
should_close = True
else:
raise IOError(2, 'No such file: %r' % args.files_from_file)
try:
# Remove ''
# XXX: how can I detect bad filenames? One user accidentally ran
# grin -f against a binary file and got an unhelpful error message
# later.
if args.null_separated:
files.extend([x.strip() for x in files_file.read().split('\0')])
else:
files.extend([x.strip() for x in files_file])
finally:
if should_close:
files_file.close()
# Now add the filenames provided on the command line itself.
files.extend(args.files)
if args.sys_path:
files.extend(sys.path)
# Make sure we don't have any empty strings lying around.
# Also skip certain special null files which may be added by programs like
# Emacs.
if sys.platform == 'win32':
upper_bad = set(['NUL:', 'NUL'])
raw_bad = set([''])
else:
upper_bad = set()
raw_bad = set(['', '/dev/null'])
files = [fn for fn in files if fn not in raw_bad and fn.upper() not in upper_bad]
if len(files) == 0:
# Add the current directory at least.
files = ['.']
# Go over our list of filenames and see if we can recognize each as
# something we want to grep.
fr = get_recognizer(args)
for fn in files:
# Special case text stdin.
if fn == '-':
yield fn, 'text'
continue
kind = fr.recognize(fn)
if kind in ('text', 'gzip') and fnmatch.fnmatch(os.path.basename(fn), args.include):
yield fn, kind
elif kind == 'directory':
for filename, k in fr.walk(fn):
if k in ('text', 'gzip') and fnmatch.fnmatch(os.path.basename(filename), args.include):
yield filename, k
# XXX: warn about other files?
# XXX: handle binary?
def get_regex(args):
"""Get the compiled regex object to search with."""
# Combine all of the flags.
flags = 0
for flag in args.re_flags:
flags |= flag
return re.compile(args.regex, flags)
def grin_main(argv=None):
colorama.init()
try:
if argv is None:
# Look at the GRIN_ARGS environment variable for more arguments.
env_args = shlex.split(os.getenv('GRIN_ARGS', ''))
argv = [sys.argv[0]] + env_args + sys.argv[1:]
parser = get_grin_arg_parser()
args = parser.parse_args(argv[1:])
if args.context is not None:
args.before_context = args.context
args.after_context = args.context
args.use_color = args.force_color or (not args.no_color and
sys.stdout.isatty() and
(os.environ.get('TERM') !=
'dumb'))
regex = get_regex(args)
g = GrepText(regex, args)
openers = dict(text=open, gzip=gzip.open)
for filename, kind in get_filenames(args):
report = g.grep_a_file(filename, opener=openers[kind])
sys.stdout.write(report)
except KeyboardInterrupt:
raise SystemExit(0)
except IOError as e:
if 'Broken pipe' in str(e):
# The user is probably piping to a pager like less(1) and has
# exited it. Just exit.
raise SystemExit(0)
reraise(IOError, e)
finally:
colorama.deinit()
def print_null(filename):
# Note that the final filename will have a trailing NUL, just like
# "find -print0" does.
sys.stdout.write(filename)
sys.stdout.write('\0')
def grind_main(argv=None):
colorama.init()
try:
if argv is None:
# Look at the GRIND_ARGS environment variable for more arguments.
env_args = shlex.split(os.getenv('GRIND_ARGS', ''))
argv = [sys.argv[0]] + env_args + sys.argv[1:]
parser = get_grind_arg_parser()
args = parser.parse_args(argv[1:])
# Define the output function.
output = print_null if args.null_separated else print
if args.sys_path:
args.dirs.extend(sys.path)
fr = get_recognizer(args)
for dir in args.dirs:
for filename, k in fr.walk(dir):
if fnmatch.fnmatch(os.path.basename(filename), args.glob):
output(filename)
except KeyboardInterrupt:
raise SystemExit(0)
except IOError as e:
if 'Broken pipe' in str(e):
# The user is probably piping to a pager like less(1) and has
# exited it. Just exit.
raise SystemExit(0)
raise
finally:
colorama.deinit()
if __name__ == '__main__':
grin_main()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
import unittest
from collections import defaultdict
from tempfile import mkdtemp
from textwrap import dedent
from pants.base.build_file import BuildFile
from pants.base.build_root import BuildRoot
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.exceptions import TaskError
from pants.base.file_system_project_tree import FileSystemProjectTree
from pants.build_graph.address import Address
from pants.build_graph.build_configuration import BuildConfiguration
from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.build_file_parser import BuildFileParser
from pants.build_graph.mutable_build_graph import MutableBuildGraph
from pants.build_graph.target import Target
from pants.init.util import clean_global_runtime_state
from pants.source.source_root import SourceRootConfig
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import safe_mkdir, safe_open, safe_rmtree
from pants_test.base.context_utils import create_context
from pants_test.option.util.fakes import create_options_for_optionables
class TestGenerator(object):
"""A mixin that facilitates test generation at runtime."""
@classmethod
def generate_tests(cls):
"""Generate tests for a given class.
This should be called against the composing class in it's defining module, e.g.
class ThingTest(TestGenerator):
...
ThingTest.generate_tests()
"""
raise NotImplementedError()
@classmethod
def add_test(cls, method_name, method):
"""A classmethod that adds dynamic test methods to a given class.
:param string method_name: The name of the test method (e.g. `test_thing_x`).
:param callable method: A callable representing the method. This should take a 'self' argument
as its first parameter for instance method binding.
"""
assert not hasattr(cls, method_name), (
'a test with name `{}` already exists on `{}`!'.format(method_name, cls.__name__)
)
assert method_name.startswith('test_'), '{} is not a valid test name!'.format(method_name)
setattr(cls, method_name, method)
# TODO: Rename to 'TestBase', for uniformity, and also for logic: This is a baseclass
# for tests, not a test of a thing called 'Base'.
class BaseTest(unittest.TestCase):
"""A baseclass useful for tests requiring a temporary buildroot.
:API: public
"""
def build_path(self, relpath):
"""Returns the canonical BUILD file path for the given relative build path.
:API: public
"""
if os.path.basename(relpath).startswith('BUILD'):
return relpath
else:
return os.path.join(relpath, 'BUILD')
def create_dir(self, relpath):
"""Creates a directory under the buildroot.
:API: public
relpath: The relative path to the directory from the build root.
"""
path = os.path.join(self.build_root, relpath)
safe_mkdir(path)
return path
def create_workdir_dir(self, relpath):
"""Creates a directory under the work directory.
:API: public
relpath: The relative path to the directory from the work directory.
"""
path = os.path.join(self.pants_workdir, relpath)
safe_mkdir(path)
return path
def create_file(self, relpath, contents='', mode='wb'):
"""Writes to a file under the buildroot.
:API: public
relpath: The relative path to the file from the build root.
contents: A string containing the contents of the file - '' by default..
mode: The mode to write to the file in - over-write by default.
"""
path = os.path.join(self.build_root, relpath)
with safe_open(path, mode=mode) as fp:
fp.write(contents)
return path
def create_workdir_file(self, relpath, contents='', mode='wb'):
"""Writes to a file under the work directory.
:API: public
relpath: The relative path to the file from the work directory.
contents: A string containing the contents of the file - '' by default..
mode: The mode to write to the file in - over-write by default.
"""
path = os.path.join(self.pants_workdir, relpath)
with safe_open(path, mode=mode) as fp:
fp.write(contents)
return path
def add_to_build_file(self, relpath, target):
"""Adds the given target specification to the BUILD file at relpath.
:API: public
relpath: The relative path to the BUILD file from the build root.
target: A string containing the target definition as it would appear in a BUILD file.
"""
self.create_file(self.build_path(relpath), target, mode='a')
return BuildFile(self.address_mapper._project_tree, relpath=self.build_path(relpath))
def make_target(self,
spec='',
target_type=Target,
dependencies=None,
derived_from=None,
synthetic=False,
**kwargs):
"""Creates a target and injects it into the test's build graph.
:API: public
:param string spec: The target address spec that locates this target.
:param type target_type: The concrete target subclass to create this new target from.
:param list dependencies: A list of target instances this new target depends on.
:param derived_from: The target this new target was derived from.
:type derived_from: :class:`pants.build_graph.target.Target`
"""
address = Address.parse(spec)
target = target_type(name=address.target_name,
address=address,
build_graph=self.build_graph,
**kwargs)
dependencies = dependencies or []
self.build_graph.inject_target(target,
dependencies=[dep.address for dep in dependencies],
derived_from=derived_from,
synthetic=synthetic)
# TODO(John Sirois): This re-creates a little bit too much work done by the BuildGraph.
# Fixup the BuildGraph to deal with non BuildFileAddresses better and just leverage it.
for traversable_dependency_spec in target.traversable_dependency_specs:
traversable_dependency_address = Address.parse(traversable_dependency_spec,
relative_to=address.spec_path)
traversable_dependency_target = self.build_graph.get_target(traversable_dependency_address)
if not traversable_dependency_target:
raise ValueError('Tests must make targets for traversable dependency specs ahead of them '
'being traversed, {} tried to traverse {} which does not exist.'
.format(target, traversable_dependency_address))
if traversable_dependency_target not in target.dependencies:
self.build_graph.inject_dependency(dependent=target.address,
dependency=traversable_dependency_address)
target.mark_transitive_invalidation_hash_dirty()
return target
@property
def alias_groups(self):
"""
:API: public
"""
return BuildFileAliases(targets={'target': Target})
@property
def build_ignore_patterns(self):
"""
:API: public
"""
return None
def setUp(self):
"""
:API: public
"""
super(BaseTest, self).setUp()
# Avoid resetting the Runtracker here, as that is specific to fork'd process cleanup.
clean_global_runtime_state(reset_runtracker=False, reset_subsystem=True)
self.real_build_root = BuildRoot().path
self.build_root = os.path.realpath(mkdtemp(suffix='_BUILD_ROOT'))
self.subprocess_dir = os.path.join(self.build_root, '.pids')
self.addCleanup(safe_rmtree, self.build_root)
self.pants_workdir = os.path.join(self.build_root, '.pants.d')
safe_mkdir(self.pants_workdir)
self.options = defaultdict(dict) # scope -> key-value mapping.
self.options[''] = {
'pants_workdir': self.pants_workdir,
'pants_supportdir': os.path.join(self.build_root, 'build-support'),
'pants_distdir': os.path.join(self.build_root, 'dist'),
'pants_configdir': os.path.join(self.build_root, 'config'),
'pants_subprocessdir': self.subprocess_dir,
'cache_key_gen_version': '0-test',
}
self.options['cache'] = {
'read_from': [],
'write_to': [],
}
BuildRoot().path = self.build_root
self.addCleanup(BuildRoot().reset)
self._build_configuration = BuildConfiguration()
self._build_configuration.register_aliases(self.alias_groups)
self.build_file_parser = BuildFileParser(self._build_configuration, self.build_root)
self.project_tree = FileSystemProjectTree(self.build_root)
self.reset_build_graph()
def buildroot_files(self, relpath=None):
"""Returns the set of all files under the test build root.
:API: public
:param string relpath: If supplied, only collect files from this subtree.
:returns: All file paths found.
:rtype: set
"""
def scan():
for root, dirs, files in os.walk(os.path.join(self.build_root, relpath or '')):
for f in files:
yield os.path.relpath(os.path.join(root, f), self.build_root)
return set(scan())
def reset_build_graph(self):
"""Start over with a fresh build graph with no targets in it."""
self.address_mapper = BuildFileAddressMapper(self.build_file_parser, self.project_tree,
build_ignore_patterns=self.build_ignore_patterns)
self.build_graph = MutableBuildGraph(address_mapper=self.address_mapper)
def set_options_for_scope(self, scope, **kwargs):
self.options[scope].update(kwargs)
def context(self, for_task_types=None, options=None, passthru_args=None, target_roots=None,
console_outstream=None, workspace=None, for_subsystems=None):
"""
:API: public
"""
# Many tests use source root functionality via the SourceRootConfig.global_instance().
# (typically accessed via Target.target_base), so we always set it up, for convenience.
optionables = {SourceRootConfig}
extra_scopes = set()
for_subsystems = for_subsystems or ()
for subsystem in for_subsystems:
if subsystem.options_scope is None:
raise TaskError('You must set a scope on your subsystem type before using it in tests.')
optionables.add(subsystem)
for_task_types = for_task_types or ()
for task_type in for_task_types:
scope = task_type.options_scope
if scope is None:
raise TaskError('You must set a scope on your task type before using it in tests.')
optionables.add(task_type)
extra_scopes.update([si.scope for si in task_type.known_scope_infos()])
optionables.update(Subsystem.closure(
set([dep.subsystem_cls for dep in task_type.subsystem_dependencies_iter()]) |
self._build_configuration.subsystems()))
# Now default the option values and override with any caller-specified values.
# TODO(benjy): Get rid of the options arg, and require tests to call set_options.
options = options.copy() if options else {}
for s, opts in self.options.items():
scoped_opts = options.setdefault(s, {})
scoped_opts.update(opts)
options = create_options_for_optionables(optionables,
extra_scopes=extra_scopes,
options=options)
Subsystem.reset(reset_options=True)
Subsystem.set_options(options)
context = create_context(options=options,
passthru_args=passthru_args,
target_roots=target_roots,
build_graph=self.build_graph,
build_file_parser=self.build_file_parser,
address_mapper=self.address_mapper,
console_outstream=console_outstream,
workspace=workspace)
return context
def tearDown(self):
"""
:API: public
"""
super(BaseTest, self).tearDown()
BuildFile.clear_cache()
Subsystem.reset()
def target(self, spec):
"""Resolves the given target address to a Target object.
:API: public
address: The BUILD target address to resolve.
Returns the corresponding Target or else None if the address does not point to a defined Target.
"""
address = Address.parse(spec)
self.build_graph.inject_address_closure(address)
return self.build_graph.get_target(address)
def targets(self, spec):
"""Resolves a target spec to one or more Target objects.
:API: public
spec: Either BUILD target address or else a target glob using the siblings ':' or
descendants '::' suffixes.
Returns the set of all Targets found.
"""
spec = CmdLineSpecParser(self.build_root).parse_spec(spec)
addresses = list(self.address_mapper.scan_specs([spec]))
for address in addresses:
self.build_graph.inject_address_closure(address)
targets = [self.build_graph.get_target(address) for address in addresses]
return targets
def create_files(self, path, files):
"""Writes to a file under the buildroot with contents same as file name.
:API: public
path: The relative path to the file from the build root.
files: List of file names.
"""
for f in files:
self.create_file(os.path.join(path, f), contents=f)
def create_library(self, path, target_type, name, sources=None, **kwargs):
"""Creates a library target of given type at the BUILD file at path with sources
:API: public
path: The relative path to the BUILD file from the build root.
target_type: valid pants target type.
name: Name of the library target.
sources: List of source file at the path relative to path.
**kwargs: Optional attributes that can be set for any library target.
Currently it includes support for resources, java_sources, provides
and dependencies.
"""
if sources:
self.create_files(path, sources)
self.add_to_build_file(path, dedent('''
%(target_type)s(name='%(name)s',
%(sources)s
%(resources)s
%(java_sources)s
%(provides)s
%(dependencies)s
)
''' % dict(target_type=target_type,
name=name,
sources=('sources=%s,' % repr(sources)
if sources else ''),
resources=('resources=["%s"],' % kwargs.get('resources')
if 'resources' in kwargs else ''),
java_sources=('java_sources=[%s],'
% ','.join(map(lambda str_target: '"%s"' % str_target,
kwargs.get('java_sources')))
if 'java_sources' in kwargs else ''),
provides=('provides=%s,' % kwargs.get('provides')
if 'provides' in kwargs else ''),
dependencies=('dependencies=%s,' % kwargs.get('dependencies')
if 'dependencies' in kwargs else ''),
)))
return self.target('%s:%s' % (path, name))
def create_resources(self, path, name, *sources):
"""
:API: public
"""
return self.create_library(path, 'resources', name, sources)
def assertUnorderedPrefixEqual(self, expected, actual_iter):
"""Consumes len(expected) items from the given iter, and asserts that they match, unordered.
:API: public
"""
actual = list(itertools.islice(actual_iter, len(expected)))
self.assertEqual(sorted(expected), sorted(actual))
def assertPrefixEqual(self, expected, actual_iter):
"""Consumes len(expected) items from the given iter, and asserts that they match, in order.
:API: public
"""
self.assertEqual(expected, list(itertools.islice(actual_iter, len(expected))))
|
|
from __future__ import absolute_import
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import os
from .common import Test, Skipped, free_tcp_ports, \
MessengerReceiverC, MessengerSenderC, \
ReactorReceiverC, ReactorSenderC, \
isSSLPresent
#
# Tests that run the apps
#
class AppTests(Test):
def __init__(self, *args):
Test.__init__(self, *args)
self.is_valgrind = False
def default(self, name, value, **kwargs):
if self.is_valgrind:
default = kwargs.get("valgrind", value)
else:
default = value
return Test.default(self, name, default, **kwargs)
@property
def iterations(self):
return int(self.default("iterations", 2, fast=1, valgrind=2))
@property
def send_count(self):
return int(self.default("send_count", 17, fast=1, valgrind=2))
@property
def target_count(self):
return int(self.default("target_count", 5, fast=1, valgrind=2))
@property
def send_batch(self):
return int(self.default("send_batch", 7, fast=1, valgrind=2))
@property
def forward_count(self):
return int(self.default("forward_count", 5, fast=1, valgrind=2))
@property
def port_count(self):
return int(self.default("port_count", 3, fast=1, valgrind=2))
@property
def sender_count(self):
return int(self.default("sender_count", 3, fast=1, valgrind=2))
def valgrind_test(self):
self.is_valgrind = True
def setUp(self):
self.senders = []
self.receivers = []
def tearDown(self):
pass
def _do_test(self, iterations=1):
verbose = self.verbose
for R in self.receivers:
R.start( verbose )
for j in range(iterations):
for S in self.senders:
S.start( verbose )
for S in self.senders:
S.wait()
#print("SENDER OUTPUT:")
#print( S.stdout() )
assert S.status() == 0, ("Command '%s' failed status=%d: '%s' '%s'"
% (str(S.cmdline()),
S.status(),
S.stdout(),
S.stderr()))
for R in self.receivers:
R.wait()
#print("RECEIVER OUTPUT")
#print( R.stdout() )
assert R.status() == 0, ("Command '%s' failed status=%d: '%s' '%s'"
% (str(R.cmdline()),
R.status(),
R.stdout(),
R.stderr()))
#
# Traffic passing tests based on the Messenger apps
#
class MessengerTests(AppTests):
_timeout = 60
def _ssl_check(self):
if not isSSLPresent():
raise Skipped("No SSL libraries found.")
if os.name=="nt":
raise Skipped("Windows SChannel lacks anonymous cipher support.")
def __init__(self, *args):
AppTests.__init__(self, *args)
def _do_oneway_test(self, receiver, sender, domain="amqp"):
""" Send N messages to a receiver.
Parameters:
iterations - repeat the senders this many times
target_count = # of targets to send to.
send_count = # messages sent to each target
"""
iterations = self.iterations
send_count = self.send_count
target_count = self.target_count
send_total = send_count * target_count
receive_total = send_total * iterations
port = free_tcp_ports()[0]
receiver.subscriptions = ["%s://~0.0.0.0:%s" % (domain, port)]
receiver.receive_count = receive_total
receiver.timeout = MessengerTests._timeout
self.receivers.append( receiver )
sender.targets = ["%s://0.0.0.0:%s/X%d" % (domain, port, j) for j in range(target_count)]
sender.send_count = send_total
sender.timeout = MessengerTests._timeout
self.senders.append( sender )
self._do_test(iterations)
def _do_echo_test(self, receiver, sender, domain="amqp"):
""" Send N messages to a receiver, which responds to each.
Parameters:
iterations - repeat the senders this many times
target_count - # targets to send to
send_count = # messages sent to each target
send_batch - wait for replies after this many messages sent
"""
iterations = self.iterations
send_count = self.send_count
target_count = self.target_count
send_batch = self.send_batch
send_total = send_count * target_count
receive_total = send_total * iterations
port = free_tcp_ports()[0]
receiver.subscriptions = ["%s://~0.0.0.0:%s" % (domain, port)]
receiver.receive_count = receive_total
receiver.send_reply = True
receiver.timeout = MessengerTests._timeout
self.receivers.append( receiver )
sender.targets = ["%s://0.0.0.0:%s/%dY" % (domain, port, j) for j in range(target_count)]
sender.send_count = send_total
sender.get_reply = True
sender.send_batch = send_batch
sender.timeout = MessengerTests._timeout
self.senders.append( sender )
self._do_test(iterations)
# Removed messenger "relay" tests. The test start-up is faulty:
# msgr-recv prints it's -X ready message when it starts to open a
# connection but it does not wait for the remote open. The relay
# tests depends on mapping a container name from an incoming
# connection. They can fail under if the sender starts before the
# connection is complete (esp. valgrind with SSL connections) We
# could fix the tests but since messenger is deprecated it does
# not seem worthwhile.
def _do_star_topology_test(self, r_factory, s_factory, domain="amqp"):
"""
A star-like topology, with a central receiver at the hub, and senders at
the spokes. Each sender will connect to each of the ports the receiver is
listening on. Each sender will then create N links per each connection.
Each sender will send X messages per link, waiting for a response.
Parameters:
iterations - repeat the senders this many times
port_count - # of ports the receiver will listen on. Each sender connects
to all ports.
sender_count - # of senders
target_count - # of targets per connection
send_count - # of messages sent to each target
send_batch - # of messages to send before waiting for response
"""
iterations = self.iterations
port_count = self.port_count
sender_count = self.sender_count
target_count = self.target_count
send_count = self.send_count
send_batch = self.send_batch
send_total = port_count * target_count * send_count
receive_total = send_total * sender_count * iterations
ports = free_tcp_ports(port_count)
receiver = r_factory()
receiver.subscriptions = ["%s://~0.0.0.0:%s" % (domain, port) for port in ports]
receiver.receive_count = receive_total
receiver.send_reply = True
receiver.timeout = MessengerTests._timeout
self.receivers.append( receiver )
for i in range(sender_count):
sender = s_factory()
sender.targets = ["%s://0.0.0.0:%s/%d" % (domain, port, j) for port in ports for j in range(target_count)]
sender.send_count = send_total
sender.send_batch = send_batch
sender.get_reply = True
sender.timeout = MessengerTests._timeout
self.senders.append( sender )
self._do_test(iterations)
def test_oneway_C(self):
self._do_oneway_test(MessengerReceiverC(), MessengerSenderC())
def test_oneway_C_SSL(self):
self._ssl_check()
self._do_oneway_test(MessengerReceiverC(), MessengerSenderC(), "amqps")
def test_echo_C(self):
self._do_echo_test(MessengerReceiverC(), MessengerSenderC())
def test_echo_C_SSL(self):
self._ssl_check()
self._do_echo_test(MessengerReceiverC(), MessengerSenderC(), "amqps")
def test_star_topology_C(self):
self._do_star_topology_test( MessengerReceiverC, MessengerSenderC )
def test_star_topology_C_SSL(self):
self._ssl_check()
self._do_star_topology_test( MessengerReceiverC, MessengerSenderC, "amqps" )
def test_oneway_reactor(self):
self._do_oneway_test(ReactorReceiverC(), ReactorSenderC())
|
|
#!/usr/bin/env python
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
## See the LICENSE file in the project root for more information.
#
##
# Title : coreclr_arguments.py
#
# Notes:
#
# Setup script, to avoid re-writing argument validation between different
# coreclr scripts.
#
################################################################################
################################################################################
import argparse
import datetime
import json
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import time
import re
import string
import xml.etree.ElementTree
from collections import defaultdict
from sys import platform as _platform
################################################################################
################################################################################
class CoreclrArguments:
############################################################################
# ctor
############################################################################
def __init__(self,
args,
require_built_test_dir=False,
require_built_core_root=False,
require_built_product_dir=False,
default_build_type="Debug"):
""" Setup the args based on the argparser obj
Args:
args(ArgParser): Parsed arguments
Notes:
If there is no core_root, or test location passed, create a default
location using the build type and the arch.
"""
# Default values. Note that these are extensible.
self.host_os = None
self.arch = None
self.build_type = None
self.core_root = None
self.coreclr_repo_location = None
self.default_build_type = default_build_type
self.require_built_product_dir = require_built_product_dir
self.require_built_core_root = require_built_core_root
self.require_built_test_dir = require_built_test_dir
self.valid_arches = ["x64", "x86", "arm", "arm64"]
self.valid_build_types = ["Debug", "Checked", "Release"]
self.valid_host_os = ["Windows", "Windows_NT", "OSX", "Linux"]
self.__initialize__(args)
############################################################################
# Instance Methods
############################################################################
def check_build_type(self, build_type):
if build_type != None and len(build_type) > 0:
# Force the build type to be capitalized
build_type = build_type.capitalize()
return build_type
elif build_type == None:
return self.default_build_type
if not build_type in self.valid_build_types:
return False
return True
def verify(self,
args,
arg_name,
verify,
failure_str,
arg_value=None,
modify_arg=None,
modify_after_validation=False):
""" Verify an arg
Args:
args (argParser) : arg parser args
arg_name (String) : argument to verify
verify (lambda: arg -> bool) : verify method
failure_str (String) : failure output if not verified
modify_arg (lambda: arg -> arg) : modify the argument before assigning
Returns:
verified (bool)
"""
verified = False
arg_value = None
if isinstance(args, argparse.Namespace):
try:
arg_value = getattr(args, arg_name)
except:
pass
else:
arg_value = args
if modify_arg != None and not modify_after_validation:
arg_value = modify_arg(arg_value)
try:
verified = verify(arg_value)
except:
pass
if verified == False and isinstance(failure_str, str):
print(failure_str)
sys.exit(1)
elif verified == False:
print(failure_str(arg_value))
sys.exit(1)
if modify_arg != None and modify_after_validation:
arg_value = modify_arg(arg_value)
if verified != True and arg_value is None:
arg_value = verified
# Add a new member variable based on the verified arg
setattr(self, arg_name, arg_value)
############################################################################
# Helper Methods
############################################################################
def __initialize__(self, args):
def check_host_os(host_os):
if host_os is None:
host_os = provide_default_host_os()
assert(host_os != None)
return host_os
else:
return host_os in self.valid_host_os
def check_arch(arch):
if arch is None:
arch = provide_default_arch()
assert(arch in self.valid_arches)
return arch
else:
return arch in self.valid_arches
def provide_default_arch():
platform_machine = platform.machine()
if platform_machine == "x86_64":
return "x64"
elif platform_machine == "i386":
return "x86"
elif platform_machine == "armhf":
return "arm"
elif platform_machine == "armel":
return "armel"
elif platform_machine == "aarch64" or platform_machine == "arm64":
return "arm64"
else:
raise RuntimeError("Unsupported platform")
def provide_default_host_os():
if _platform == "linux" or _platform == "linux2":
return "Linux"
elif _platform == "darwin":
return "OSX"
elif _platform == "win32":
return "Windows_NT"
else:
print("Unknown OS: %s" % self.host_os)
sys.exit(1)
return None
def check_and_return_test_location(test_location):
default_test_location = os.path.join(self.coreclr_repo_location, "bin", "tests", "%s.%s.%s" % (self.host_os, self.arch, self.build_type))
if os.path.isdir(default_test_location) or not self.require_built_test_dir:
return default_test_location
elif not os.path.isdir(test_location) and self.require_built_test_dir:
return False
return test_location
def check_and_return_default_core_root(core_root):
default_core_root = os.path.join(self.test_location, "Tests", "Core_Root")
if os.path.isdir(default_core_root) or not self.require_built_core_root:
return default_core_root
elif not os.path.isdir(core_root) and self.require_built_core_root:
return False
return core_root
def check_and_return_default_product_location(product_location):
default_product_location = os.path.join(self.bin_location, "Product", "%s.%s.%s" % (self.host_os, self.arch, self.build_type))
if os.path.isdir(default_product_location) or not self.require_built_product_dir:
return default_product_location
elif os.path.isdir(product_location) and self.require_build_product_dir:
return False
return product_location
self.coreclr_repo_location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.bin_location = os.path.join(self.coreclr_repo_location, "bin")
self.verify(args,
"host_os",
check_host_os,
"Unsupported host_os",
modify_arg=lambda host_os: provide_default_host_os() if host_os is None else host_os)
self.verify(args,
"arch",
check_arch,
"Unsupported architecture: %s.\nSupported architectures: %s" % (args.arch, ", ".join(self.valid_arches)))
self.verify(args,
"build_type",
self.check_build_type,
"Unsupported configuration: %s.\nSupported configurations: %s" % (args.build_type, ", ".join(self.valid_build_types)),
modify_arg=lambda arg: arg.capitalize())
self.verify(args,
"test_location",
check_and_return_test_location,
"Error, incorrect test location.")
self.verify(args,
"core_root",
check_and_return_default_core_root,
"Error, incorrect core_root location.")
self.verify(args,
"product_location",
check_and_return_default_product_location,
"Error, incorrect product_location.")
|
|
import datetime
import logging
import os
from itertools import groupby
from math import ceil
from django.db.models import Sum
from le_utils.constants import content_kinds
from sqlalchemy import and_
from sqlalchemy import cast
from sqlalchemy import exists
from sqlalchemy import false
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import or_
from sqlalchemy import select
from .paths import get_content_file_name
from .paths import get_content_storage_file_path
from .sqlalchemybridge import Bridge
from .sqlalchemybridge import filter_by_uuids
from kolibri.core.content.apps import KolibriContentConfig
from kolibri.core.content.errors import InvalidStorageFilenameError
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.models import File
from kolibri.core.content.models import LocalFile
from kolibri.core.device.models import ContentCacheKey
logger = logging.getLogger(__name__)
CONTENT_APP_NAME = KolibriContentConfig.label
CHUNKSIZE = 10000
def _generate_MPTT_descendants_statement(mptt_values, ContentNodeTable):
"""
This logic is modified from:
https://github.com/django-mptt/django-mptt/blob/38d46c26ca362c471b097ab96a3616b9b20fb883/mptt/managers.py#L137
in order to render the result as a SQL Alchemy expression that we can use
in other queries.
"""
queries = []
# Group the resultant mptt data by tree_id and parent_id,
# this will allow us to consolidate contiguous siblings to reduce
# the total number of constraints.
# This logic is verbatim from Django MPTT, only the query construction
# has been translated from Django Q statements to SQL Alchemy and_ statements.
for group in groupby(
mptt_values,
key=lambda n: (
# tree id
n[0],
# parent id
n[1],
),
):
next_lft = None
for node in list(group[1]):
tree = node[0]
lft = min_val = node[2]
rght = max_val = node[3]
if next_lft is None:
next_lft = rght + 1
min_max = {"min": min_val, "max": max_val}
elif lft == next_lft:
if min_val < min_max["min"]:
min_max["min"] = min_val
if max_val > min_max["max"]:
min_max["max"] = max_val
next_lft = rght + 1
elif lft != next_lft:
queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
min_max = {"min": min_val, "max": max_val}
next_lft = rght + 1
queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
return queries
def _MPTT_descendant_ids_statement(bridge, node_ids, min_boundary, max_boundary):
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
# Setup list to collect queries
or_queries = []
# First we fetch a list of non-topic ids from the specified node ids
# that match the specified tree boundary ranges
non_topic_results = connection.execute(
select([ContentNodeTable.c.id]).where(
and_(
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Also filter by the boundary conditions
# We are only interested in non-topic nodes that
# are inside the range
ContentNodeTable.c.rght >= min_boundary,
ContentNodeTable.c.rght <= max_boundary,
# Produce an id list for non topics
ContentNodeTable.c.kind != content_kinds.TOPIC,
)
)
).fetchall()
non_topic_node_ids = [result[0] for result in non_topic_results]
# If we have any node ids that are for non-topics, then we add an explicit query
# to match against those node ids
if non_topic_node_ids:
or_queries.append(filter_by_uuids(ContentNodeTable.c.id, non_topic_node_ids))
# Now get the relevant MPTT values from the database for the specified node_ids
# for topic nodes in the specified lft/rght range.
# Query modified from:
# https://github.com/django-mptt/django-mptt/blob/38d46c26ca362c471b097ab96a3616b9b20fb883/mptt/managers.py#L123
mptt_values = connection.execute(
select(
[
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
ContentNodeTable.c.rght,
]
)
.order_by(
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
)
.where(
and_(
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Add constraints specific to our requirements, in terms of batching:
# Also filter by the boundary conditions
# We are only interested in nodes that are ancestors of
# the nodes in the range, but they could be ancestors of any node
# in this range, so we filter the lft value by being less than
# or equal to the max_boundary, and the rght value by being
# greater than or equal to the min_boundary.
ContentNodeTable.c.lft <= max_boundary,
ContentNodeTable.c.rght >= min_boundary,
# And topics:
# Only select values for descendant constraints from topics
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
).fetchall()
# Extend the constraints we are filtering by with ones generated from the relevant
# MPTT values we have queried above.
or_queries.extend(
_generate_MPTT_descendants_statement(mptt_values, ContentNodeTable)
)
if not or_queries:
# No constraints that apply in this range, so therefore this query should always
# evaluate to False, because nothing can match it.
return select([ContentNodeTable.c.id]).where(false())
# Return a query that ors each of the constraints
return select([ContentNodeTable.c.id]).where(or_(*or_queries))
def _create_batch_update_statement(
bridge, channel_id, min_boundary, max_boundary, node_ids, exclude_node_ids
):
ContentNodeTable = bridge.get_table(ContentNode)
# Restrict the update statement to nodes falling within the boundaries
batch_statement = ContentNodeTable.update().where(
and_(
# Only update leaf nodes (non topics)
ContentNodeTable.c.kind != content_kinds.TOPIC,
# Only update nodes in the channel we specified
ContentNodeTable.c.channel_id == channel_id,
# Only select nodes inside the boundary conditions
ContentNodeTable.c.rght >= min_boundary,
ContentNodeTable.c.rght <= max_boundary,
)
)
if node_ids is not None:
# Construct a statement that restricts which nodes we update
# in this batch by the specified inclusion constraints
node_ids_statement = _MPTT_descendant_ids_statement(
bridge, node_ids, min_boundary, max_boundary
)
# Add this statement to the query
batch_statement = batch_statement.where(
ContentNodeTable.c.id.in_(node_ids_statement)
)
if exclude_node_ids is not None:
# Construct a statement that restricts nodes we update
# in this batch by the specified exclusion constraints
exclude_node_ids_statement = _MPTT_descendant_ids_statement(
bridge, exclude_node_ids, min_boundary, max_boundary
)
# Add this statement to the query
batch_statement = batch_statement.where(
~ContentNodeTable.c.id.in_(exclude_node_ids_statement)
)
return batch_statement
def _calculate_batch_params(bridge, channel_id, node_ids, exclude_node_ids):
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
# To chunk the tree, we first find the full extent of the tree - this gives the
# highest rght value for this channel.
max_rght = connection.execute(
select([func.max(ContentNodeTable.c.rght)]).where(
ContentNodeTable.c.channel_id == channel_id
)
).scalar()
# Count the total number of constraints
constraint_count = len(node_ids or []) + len(exclude_node_ids or [])
# Aim for a constraint per batch count of about 250 on average
# This means that there will be at most 750 parameters from the constraints
# and should therefore also limit the overall SQL expression size.
dynamic_chunksize = int(
min(CHUNKSIZE, ceil(250 * max_rght / (constraint_count or 1)))
)
return max_rght, dynamic_chunksize
def set_leaf_nodes_invisible(channel_id, node_ids=None, exclude_node_ids=None):
"""
Set nodes in a channel as unavailable.
With no additional arguments, this will hide an entire channel.
With the additional nodes arguments, it will selectively flag nodes
as unavailable, based on the passed in ids, setting them as unavailable if
they are in node_ids, or descendants of those nodes, but not in
exclude_node_ids or descendants of those nodes.
"""
bridge = Bridge(app_name=CONTENT_APP_NAME)
connection = bridge.get_connection()
# Start a counter for the while loop
min_boundary = 1
# Calculate batch parameters
max_rght, dynamic_chunksize = _calculate_batch_params(
bridge, channel_id, node_ids, exclude_node_ids
)
logger.info(
"Removing availability of non-topic ContentNode objects in {} batches of {}".format(
int(ceil(max_rght / dynamic_chunksize)), dynamic_chunksize
)
)
while min_boundary < max_rght:
batch_statement = _create_batch_update_statement(
bridge,
channel_id,
min_boundary,
min_boundary + dynamic_chunksize,
node_ids,
exclude_node_ids,
)
# Execute the update for this batch
connection.execute(
batch_statement.values(available=False).execution_options(autocommit=True)
)
min_boundary += dynamic_chunksize
bridge.end()
def set_leaf_node_availability_from_local_file_availability(
channel_id, node_ids=None, exclude_node_ids=None
):
"""
Set nodes in a channel as available, based on their required files.
With no additional arguments, this will make every node in the channel
available or unavailable based on whether the files needed to render
those nodes are present on disk.
With the additional nodes arguments, it will selectively flag nodes
based on the passed in ids, marking their availability if
they are in node_ids, or descendants of those nodes, but not in
exclude_node_ids or descendants of those nodes.
Nodes in the channel not captured by the constraints will not have
their availability changed either way.
"""
bridge = Bridge(app_name=CONTENT_APP_NAME)
# SQL Alchemy reference to the content node table
ContentNodeTable = bridge.get_table(ContentNode)
# SQL Alchemy reference to the file table - a mapping from
# contentnodes to the files that they use
FileTable = bridge.get_table(File)
# SQL Alchemy reference to the localfile table which tracks
# information about the files on disk, such as availability
LocalFileTable = bridge.get_table(LocalFile)
connection = bridge.get_connection()
# This statement defines the update condition for the contentnode
# running exists on this (as it is used below) will produce either
# True, in the case when the contentnode has the required files
# available for rendering, or False otherwise.
contentnode_statement = (
# We could select any property here, as it's the exist that matters.
select([1]).select_from(
# This does the first step in the many to many lookup for File
# and LocalFile.
FileTable.join(
LocalFileTable,
and_(
# This does the actual correlation between file and local file
FileTable.c.local_file_id == LocalFileTable.c.id,
# This only joins on LocalFile objects that we know
# have associated files on disk.
LocalFileTable.c.available == True, # noqa
),
)
)
# Only look at files that are required (not supplementary)
.where(FileTable.c.supplementary == False)
# Correlate between the contentnode id and the foreign key
# to the content node on the file table to complete the
# many to many lookup
.where(ContentNodeTable.c.id == FileTable.c.contentnode_id)
)
# Start a counter for the while loop
min_boundary = 1
# Calculate batch parameters
max_rght, dynamic_chunksize = _calculate_batch_params(
bridge, channel_id, node_ids, exclude_node_ids
)
logger.info(
"Setting availability of non-topic ContentNode objects based on LocalFile availability in {} batches of {}".format(
int(ceil(max_rght / dynamic_chunksize)), dynamic_chunksize
)
)
while min_boundary < max_rght:
batch_statement = _create_batch_update_statement(
bridge,
channel_id,
min_boundary,
min_boundary + dynamic_chunksize,
node_ids,
exclude_node_ids,
)
# Execute the update for this batch
connection.execute(
batch_statement.values(
available=exists(contentnode_statement)
).execution_options(autocommit=True)
)
min_boundary += dynamic_chunksize
bridge.end()
def mark_local_files_as_unavailable(checksums, destination=None):
mark_local_files_availability(checksums, False, destination=destination)
def mark_local_files_as_available(checksums, destination=None):
"""
Shortcut method to update database if we are sure that the files are available.
Can be used after successful downloads to flag availability without having to do expensive disk reads.
"""
mark_local_files_availability(checksums, True, destination=destination)
def mark_local_files_availability(checksums, availability, destination=None):
if checksums:
bridge = Bridge(app_name=CONTENT_APP_NAME, sqlite_file_path=destination)
LocalFileClass = bridge.get_class(LocalFile)
logger.info(
"Setting availability to {availability} of {number} LocalFile objects based on passed in checksums".format(
number=len(checksums), availability=availability
)
)
for i in range(0, len(checksums), CHUNKSIZE):
bridge.session.bulk_update_mappings(
LocalFileClass,
(
{"id": checksum, "available": availability}
for checksum in checksums[i : i + CHUNKSIZE]
),
)
bridge.session.flush()
bridge.session.commit()
bridge.end()
def set_local_file_availability_from_disk(checksums=None, destination=None):
bridge = Bridge(app_name=CONTENT_APP_NAME, sqlite_file_path=destination)
LocalFileClass = bridge.get_class(LocalFile)
if checksums is None:
logger.info(
"Setting availability of LocalFile objects based on disk availability"
)
files = bridge.session.query(
LocalFileClass.id, LocalFileClass.available, LocalFileClass.extension
).all()
elif type(checksums) == list:
logger.info(
"Setting availability of {number} LocalFile objects based on disk availability".format(
number=len(checksums)
)
)
files = (
bridge.session.query(
LocalFileClass.id, LocalFileClass.available, LocalFileClass.extension
)
.filter(LocalFileClass.id.in_(checksums))
.all()
)
else:
logger.info(
"Setting availability of LocalFile object with checksum {checksum} based on disk availability".format(
checksum=checksums
)
)
files = [bridge.session.query(LocalFileClass).get(checksums)]
checksums_to_set_available = []
checksums_to_set_unavailable = []
for file in files:
try:
# Update if the file exists, *and* the localfile is set as unavailable.
if os.path.exists(
get_content_storage_file_path(get_content_file_name(file))
):
if not file.available:
checksums_to_set_available.append(file.id)
# Update if the file does not exist, *and* the localfile is set as available.
else:
if file.available:
checksums_to_set_unavailable.append(file.id)
except InvalidStorageFilenameError:
continue
bridge.end()
mark_local_files_as_available(checksums_to_set_available, destination=destination)
mark_local_files_as_unavailable(
checksums_to_set_unavailable, destination=destination
)
def recurse_annotation_up_tree(channel_id):
bridge = Bridge(app_name=CONTENT_APP_NAME)
ContentNodeClass = bridge.get_class(ContentNode)
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
node_depth = (
bridge.session.query(func.max(ContentNodeClass.level))
.filter_by(channel_id=channel_id)
.scalar()
)
logger.info(
"Annotating ContentNode objects with children for {levels} levels".format(
levels=node_depth
)
)
child = ContentNodeTable.alias()
# start a transaction
trans = connection.begin()
start = datetime.datetime.now()
# Update all leaf ContentNodes to have num_coach_content to 1 or 0
# Update all leaf ContentNodes to have on_device_resources to 1 or 0
connection.execute(
ContentNodeTable.update()
.where(
and_(
# In this channel
ContentNodeTable.c.channel_id == channel_id,
# That are not topics
ContentNodeTable.c.kind != content_kinds.TOPIC,
)
)
.values(
num_coach_contents=cast(ContentNodeTable.c.coach_content, Integer()),
on_device_resources=cast(ContentNodeTable.c.available, Integer()),
)
)
# Before starting set availability to False on all topics.
connection.execute(
ContentNodeTable.update()
.where(
and_(
# In this channel
ContentNodeTable.c.channel_id == channel_id,
# That are topics
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
.values(available=False)
)
# Expression to capture all available child nodes of a contentnode
available_nodes = select([child.c.available]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expressions for annotation of coach content
# Expression that will resolve a boolean value for all the available children
# of a content node, whereby if they all have coach_content flagged on them, it will be true,
# but otherwise false.
# Everything after the select statement should be identical to the available_nodes expression above.
if bridge.engine.name == "sqlite":
# Use a min function to simulate an AND.
coach_content_nodes = select([func.min(child.c.coach_content)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
elif bridge.engine.name == "postgresql":
# Use the postgres boolean AND operator
coach_content_nodes = select([func.bool_and(child.c.coach_content)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expression that sums the total number of coach contents for each child node
# of a contentnode
coach_content_num = select([func.sum(child.c.num_coach_contents)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expression that sums the total number of on_device_resources for each child node
# of a contentnode
on_device_num = select([func.sum(child.c.on_device_resources)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Go from the deepest level to the shallowest
for level in range(node_depth, 0, -1):
logger.info(
"Annotating ContentNode objects with children for level {level}".format(
level=level
)
)
# Only modify topic availability here
connection.execute(
ContentNodeTable.update()
.where(
and_(
ContentNodeTable.c.level == level - 1,
ContentNodeTable.c.channel_id == channel_id,
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
# Because we have set availability to False on all topics as a starting point
# we only need to make updates to topics with available children.
.where(exists(available_nodes))
.values(
available=exists(available_nodes),
coach_content=coach_content_nodes,
num_coach_contents=coach_content_num,
on_device_resources=on_device_num,
)
)
# commit the transaction
trans.commit()
elapsed = datetime.datetime.now() - start
logger.debug(
"Recursive topic tree annotation took {} seconds".format(elapsed.seconds)
)
bridge.end()
def calculate_dummy_progress_for_annotation(node_ids, exclude_node_ids, total_progress):
num_annotation_constraints = len(node_ids or []) + len(exclude_node_ids or [])
# Calculate a percentage of the total progress to denote to annotation
# between 1 and 10
annotation_proportion = min(10, max(1, int(num_annotation_constraints / 500)))
# Create some progress proportional to annotation task
return int(annotation_proportion * total_progress / (100 - annotation_proportion))
def propagate_forced_localfile_removal(localfiles):
files = File.objects.filter(supplementary=False, local_file__in=localfiles)
ContentNode.objects.filter(files__in=files).update(available=False)
for channel_id in ChannelMetadata.objects.all().values_list("id", flat=True):
recurse_annotation_up_tree(channel_id)
def update_content_metadata(
channel_id, node_ids=None, exclude_node_ids=None, public=None
):
set_leaf_node_availability_from_local_file_availability(
channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids
)
recurse_annotation_up_tree(channel_id)
set_channel_metadata_fields(channel_id, public=public)
ContentCacheKey.update_cache_key()
def set_content_visibility(
channel_id, checksums, node_ids=None, exclude_node_ids=None, public=None
):
mark_local_files_as_available(checksums)
update_content_metadata(
channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids, public=public
)
def set_content_visibility_from_disk(channel_id):
set_local_file_availability_from_disk()
update_content_metadata(channel_id)
def set_content_invisible(channel_id, node_ids, exclude_node_ids):
set_leaf_nodes_invisible(channel_id, node_ids, exclude_node_ids)
recurse_annotation_up_tree(channel_id)
set_channel_metadata_fields(channel_id)
ContentCacheKey.update_cache_key()
def set_channel_metadata_fields(channel_id, public=None):
channel = ChannelMetadata.objects.get(id=channel_id)
calculate_published_size(channel)
calculate_total_resource_count(channel)
calculate_included_languages(channel)
calculate_next_order(channel)
if public is not None:
channel.public = public
channel.save()
def files_for_nodes(nodes):
return LocalFile.objects.filter(files__contentnode__in=nodes)
def total_file_size(files_or_nodes):
if issubclass(files_or_nodes.model, LocalFile):
localfiles = files_or_nodes
elif issubclass(files_or_nodes.model, ContentNode):
localfiles = files_for_nodes(files_or_nodes)
else:
raise TypeError("Expected queryset for LocalFile or ContentNode")
return localfiles.distinct().aggregate(Sum("file_size"))["file_size__sum"] or 0
def calculate_published_size(channel):
content_nodes = ContentNode.objects.filter(channel_id=channel.id)
channel.published_size = total_file_size(
files_for_nodes(content_nodes).filter(available=True)
)
channel.save()
def calculate_total_resource_count(channel):
content_nodes = ContentNode.objects.filter(channel_id=channel.id)
channel.total_resource_count = (
content_nodes.filter(available=True)
.exclude(kind=content_kinds.TOPIC)
.dedupe_by_content_id()
.count()
)
channel.save()
def calculate_included_languages(channel):
content_nodes = ContentNode.objects.filter(
channel_id=channel.id, available=True
).exclude(lang=None)
languages = content_nodes.order_by("lang").values_list("lang", flat=True).distinct()
channel.included_languages.add(*list(languages))
def calculate_next_order(channel, model=ChannelMetadata):
latest_order = model.objects.latest("order").order
if latest_order is None:
channel.order = 1
if channel.order is None or channel.order == 0:
channel.order = latest_order + 1
channel.save()
|
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_tf, require_tokenizers, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeq2SeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class TFBlenderbotSmallModelTester:
config_cls = BlenderbotSmallConfig
config_updates = {}
hidden_act = "gelu"
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.config_cls(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_ids=[2],
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
**self.config_updates,
)
inputs_dict = prepare_blenderbot_small_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = TFBlenderbotSmallModel(config=config).get_decoder()
input_ids = inputs_dict["input_ids"]
input_ids = input_ids[:1, :]
attention_mask = inputs_dict["attention_mask"][:1, :]
head_mask = inputs_dict["head_mask"]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def prepare_blenderbot_small_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8)
if decoder_attention_mask is None:
decoder_attention_mask = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8),
],
axis=-1,
)
if head_mask is None:
head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class TFBlenderbotSmallModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
all_generative_model_classes = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
is_encoder_decoder = True
test_pruning = False
test_onnx = False
def setUp(self):
self.model_tester = TFBlenderbotSmallModelTester(self)
self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_resize_token_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model(model.dummy_inputs)
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
old_final_logits_bias = model.get_bias()
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
new_final_logits_bias = model.get_bias()
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_final_logits_bias is not None and new_final_logits_bias is not None:
old_final_logits_bias = old_final_logits_bias["final_logits_bias"]
new_final_logits_bias = new_final_logits_bias["final_logits_bias"]
self.assertEqual(new_final_logits_bias.shape[0], 1)
self.assertEqual(new_final_logits_bias.shape[1], assert_size)
models_equal = True
for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()):
for p1, p2 in zip(old, new):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
if len(prefix) > 0:
prefix = f"{prefix}: "
raise AssertionError(f"{prefix}{a} != {b}")
def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32)
@require_tokenizers
@require_tf
class TFBlenderbot90MIntegrationTests(unittest.TestCase):
src_text = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like i'm going to throw up.\nand why is that?"
]
model_name = "facebook/blenderbot_small-90M"
@cached_property
def tokenizer(self):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
@cached_property
def model(self):
model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name)
return model
@slow
def test_90_generation_from_long_input(self):
model_inputs = self.tokenizer(self.src_text, return_tensors="tf")
generated_ids = self.model.generate(
model_inputs.input_ids,
attention_mask=model_inputs.attention_mask,
num_beams=2,
use_cache=True,
)
generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
|
|
import operator
from typing import Callable, Iterable, Tuple
from pypcode import OpCode
import claripy
from claripy.ast.bv import BV
# pylint:disable=abstract-method
def make_bv_sizes_equal(bv1: BV, bv2: BV) -> Tuple[BV, BV]:
"""
Makes two BVs equal in length through sign extension.
"""
if bv1.size() < bv2.size():
return (bv1.sign_extend(bv2.size() - bv1.size()), bv2)
elif bv1.size() > bv2.size():
return (bv1, bv2.sign_extend(bv1.size() - bv2.size()))
else:
return (bv1, bv2)
# FIXME: Unimplemented ops (mostly floating point related) have associated C++
# reference code from Ghidra which will need to be ported.
class OpBehavior:
"""
Base class for all operation behaviors.
"""
__slots__ = ("opcode", "is_unary", "is_special")
opcode: int
is_unary: bool
is_special: bool
def __init__(self, opcode: int, is_unary: bool, is_special: bool = False) -> None:
self.opcode = opcode
self.is_unary = is_unary
self.is_special = is_special
def evaluate_unary(self, size_out: int, size_in: int, in1: BV) -> BV:
raise NotImplementedError("Not implemented!")
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
raise NotImplementedError("Not implemented!")
@staticmethod
def generic_compare(args: Iterable[BV], comparison: Callable[[BV, BV], BV]) -> BV:
return claripy.If(
comparison(args[0], args[1]), claripy.BVV(1, 1), claripy.BVV(0, 1)
)
class OpBehaviorCopy(OpBehavior):
"""
Behavior for the COPY operation.
"""
def __init__(self):
super().__init__(OpCode.COPY, True)
def evaluate_unary(self, size_out: int, size_in: int, in1: BV) -> BV:
return in1
class OpBehaviorEqual(OpBehavior):
"""
Behavior for the INT_EQUAL operation.
"""
def __init__(self):
super().__init__(OpCode.INT_EQUAL, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return self.generic_compare((in1, in2), operator.eq)
class OpBehaviorNotEqual(OpBehavior):
"""
Behavior for the INT_NOTEQUAL operation.
"""
def __init__(self):
super().__init__(OpCode.INT_NOTEQUAL, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return self.generic_compare((in1, in2), operator.ne)
class OpBehaviorIntSless(OpBehavior):
"""
Behavior for the INT_SLESS operation.
"""
def __init__(self):
super().__init__(OpCode.INT_SLESS, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return self.generic_compare((in1, in2), claripy.SLT)
class OpBehaviorIntSlessEqual(OpBehavior):
"""
Behavior for the INT_SLESSEQUAL operation.
"""
def __init__(self):
super().__init__(OpCode.INT_SLESSEQUAL, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return self.generic_compare((in1, in2), claripy.SLE)
class OpBehaviorIntLess(OpBehavior):
"""
Behavior for the INT_LESS operation.
"""
def __init__(self):
super().__init__(OpCode.INT_LESS, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return self.generic_compare((in1, in2), claripy.ULT)
class OpBehaviorIntLessEqual(OpBehavior):
"""
Behavior for the INT_LESSEQUAL operation.
"""
def __init__(self):
super().__init__(OpCode.INT_LESSEQUAL, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return self.generic_compare((in1, in2), claripy.ULE)
class OpBehaviorIntZext(OpBehavior):
"""
Behavior for the INT_ZEXT operation.
"""
def __init__(self):
super().__init__(OpCode.INT_ZEXT, True)
def evaluate_unary(self, size_out: int, size_in: int, in1: BV) -> BV:
return in1.zero_extend((size_out-size_in)*8)
class OpBehaviorIntSext(OpBehavior):
"""
Behavior for the INT_SEXT operation.
"""
def __init__(self):
super().__init__(OpCode.INT_SEXT, True)
def evaluate_unary(self, size_out: int, size_in: int, in1: BV) -> BV:
return in1.sign_extend((size_out-size_in)*8)
class OpBehaviorIntAdd(OpBehavior):
"""
Behavior for the INT_ADD operation.
"""
def __init__(self):
super().__init__(OpCode.INT_ADD, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return in1 + in2
class OpBehaviorIntSub(OpBehavior):
"""
Behavior for the INT_SUB operation.
"""
def __init__(self):
super().__init__(OpCode.INT_SUB, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return in1 - in2
class OpBehaviorIntCarry(OpBehavior):
"""
Behavior for the INT_CARRY operation.
"""
def __init__(self):
super().__init__(OpCode.INT_CARRY, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
# origin: ccall.py pc_actions_ADD
res = in1 + in2
return claripy.If(claripy.ULT(res, in1), claripy.BVV(1, 1), claripy.BVV(0, 1))
class OpBehaviorIntScarry(OpBehavior):
"""
Behavior for the INT_SCARRY operation.
"""
def __init__(self):
super().__init__(OpCode.INT_SCARRY, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
res = in1 + in2
a = (in1>>(size_in*8-1))&1
b = (in2>>(size_in*8-1))&1
r = (res>>(size_in*8-1))&1
r ^= a
a ^= b
a ^= 1
r &= a
return r
class OpBehaviorIntSborrow(OpBehavior):
"""
Behavior for the INT_SBORROW operation.
"""
def __init__(self):
super().__init__(OpCode.INT_SBORROW, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
res = in1 - in2
a = (in1 >> (size_in * 8 - 1)) & 1 # Grab sign bit
b = (in2 >> (size_in * 8 - 1)) & 1 # Grab sign bit
r = (res >> (size_in * 8 - 1)) & 1 # Grab sign bit
a ^= r
r ^= b
r ^= 1
a &= r
return a
class OpBehaviorInt2Comp(OpBehavior):
"""
Behavior for the INT_2COMP operation.
"""
def __init__(self):
super().__init__(OpCode.INT_2COMP, True)
# uintb OpBehaviorInt2Comp::evaluateUnary(int4 size_out,int4 size_in,uintb in1) const
#
# {
# uintb res = uintb_negate(in1-1,size_in);
# return res;
# }
class OpBehaviorIntNegate(OpBehavior):
"""
Behavior for the INT_NEGATE operation.
"""
def __init__(self):
super().__init__(OpCode.INT_NEGATE, True)
def evaluate_unary(self, size_out: int, size_in: int, in1: BV) -> BV:
return ~in1
class OpBehaviorIntXor(OpBehavior):
"""
Behavior for the INT_XOR operation.
"""
def __init__(self):
super().__init__(OpCode.INT_XOR, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return in1 ^ in2
class OpBehaviorIntAnd(OpBehavior):
"""
Behavior for the INT_AND operation.
"""
def __init__(self):
super().__init__(OpCode.INT_AND, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return in1 & in2
class OpBehaviorIntOr(OpBehavior):
"""
Behavior for the INT_OR operation.
"""
def __init__(self):
super().__init__(OpCode.INT_OR, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return in1 | in2
class OpBehaviorIntLeft(OpBehavior):
"""
Behavior for the INT_LEFT operation.
"""
def __init__(self):
super().__init__(OpCode.INT_LEFT, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
in1, in2 = make_bv_sizes_equal(in1, in2)
return in1 << in2
class OpBehaviorIntRight(OpBehavior):
"""
Behavior for the INT_RIGHT operation.
"""
def __init__(self):
super().__init__(OpCode.INT_RIGHT, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
in1, in2 = make_bv_sizes_equal(in1, in2)
return in1.LShR(in2)
class OpBehaviorIntSright(OpBehavior):
"""
Behavior for the INT_SRIGHT operation.
"""
def __init__(self):
super().__init__(OpCode.INT_SRIGHT, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
in1, in2 = make_bv_sizes_equal(in1, in2)
return in1 >> in2
class OpBehaviorIntMult(OpBehavior):
"""
Behavior for the INT_MULT operation.
"""
def __init__(self):
super().__init__(OpCode.INT_MULT, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return in1 * in2
class OpBehaviorIntDiv(OpBehavior):
"""
Behavior for the INT_DIV operation.
"""
def __init__(self):
super().__init__(OpCode.INT_DIV, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return in1 / in2
class OpBehaviorIntSdiv(OpBehavior):
"""
Behavior for the INT_SDIV operation.
"""
def __init__(self):
super().__init__(OpCode.INT_SDIV, False)
# uintb OpBehaviorIntSdiv::evaluateBinary(int4 size_out,int4 size_in,uintb in1,uintb in2) const
#
# {
# if (in2 == 0)
# throw EvaluationError("Divide by 0");
# intb num = in1; // Convert to signed
# intb denom = in2;
# sign_extend(num,8*size_in-1);
# sign_extend(denom,8*size_in-1);
# intb sres = num/denom; // Do the signed division
# zero_extend(sres,8*size_out-1); // Cut to appropriate size
# return (uintb)sres; // Recast as unsigned
# }
class OpBehaviorIntRem(OpBehavior):
"""
Behavior for the INT_REM operation.
"""
def __init__(self):
super().__init__(OpCode.INT_REM, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return in1 % in2
class OpBehaviorIntSrem(OpBehavior):
"""
Behavior for the INT_SREM operation.
"""
def __init__(self):
super().__init__(OpCode.INT_SREM, False)
# uintb OpBehaviorIntSrem::evaluateBinary(int4 size_out,int4 size_in,uintb in1,uintb in2) const
#
# {
# if (in2 == 0)
# throw EvaluationError("Remainder by 0");
# intb val = in1;
# intb mod = in2;
# sign_extend(val,8*size_in-1); // Convert inputs to signed values
# sign_extend(mod,8*size_in-1);
# intb sres = in1 % in2; // Do the remainder
# zero_extend(sres,8*size_out-1); // Convert back to unsigned
# return (uintb)sres;
# }
class OpBehaviorBoolNegate(OpBehavior):
"""
Behavior for the BOOL_NEGATE operation.
"""
def __init__(self):
super().__init__(OpCode.BOOL_NEGATE, True)
def evaluate_unary(self, size_out: int, size_in: int, in1: BV) -> BV:
return in1 ^ 1
class OpBehaviorBoolXor(OpBehavior):
"""
Behavior for the BOOL_XOR operation.
"""
def __init__(self):
super().__init__(OpCode.BOOL_XOR, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return in1 ^ in2
class OpBehaviorBoolAnd(OpBehavior):
"""
Behavior for the BOOL_AND operation.
"""
def __init__(self):
super().__init__(OpCode.BOOL_AND, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return in1 & in2
class OpBehaviorBoolOr(OpBehavior):
"""
Behavior for the BOOL_OR operation.
"""
def __init__(self):
super().__init__(OpCode.BOOL_OR, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
return in1 | in2
class OpBehaviorFloatEqual(OpBehavior):
"""
Behavior for the FLOAT_EQUAL operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_EQUAL, False)
# uintb OpBehaviorFloatEqual::evaluateBinary(int4 size_out,int4 size_in,uintb in1,uintb in2) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateBinary(size_out,size_in,in1,in2);
#
# return format->opEqual(in1,in2);
# }
class OpBehaviorFloatNotEqual(OpBehavior):
"""
Behavior for the FLOAT_NOTEQUAL operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_NOTEQUAL, False)
# uintb OpBehaviorFloatNotEqual::evaluateBinary(int4 size_out,int4 size_in,uintb in1,uintb in2) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateBinary(size_out,size_in,in1,in2);
#
# return format->opNotEqual(in1,in2);
# }
class OpBehaviorFloatLess(OpBehavior):
"""
Behavior for the FLOAT_LESS operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_LESS, False)
# uintb OpBehaviorFloatLess::evaluateBinary(int4 size_out,int4 size_in,uintb in1,uintb in2) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateBinary(size_out,size_in,in1,in2);
#
# return format->opLess(in1,in2);
# }
class OpBehaviorFloatLessEqual(OpBehavior):
"""
Behavior for the FLOAT_LESSEQUAL operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_LESSEQUAL, False)
# uintb OpBehaviorFloatLessEqual::evaluateBinary(int4 size_out,int4 size_in,uintb in1,uintb in2) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateBinary(size_out,size_in,in1,in2);
#
# return format->opLessEqual(in1,in2);
# }
class OpBehaviorFloatNan(OpBehavior):
"""
Behavior for the FLOAT_NAN operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_NAN, True)
# uintb OpBehaviorFloatNan::evaluateUnary(int4 size_out,int4 size_in,uintb in1) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateUnary(size_out,size_in,in1);
#
# return format->opNan(in1);
# }
class OpBehaviorFloatAdd(OpBehavior):
"""
Behavior for the FLOAT_ADD operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_ADD, False)
# uintb OpBehaviorFloatAdd::evaluateBinary(int4 size_out,int4 size_in,uintb in1,uintb in2) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateBinary(size_out,size_in,in1,in2);
#
# return format->opAdd(in1,in2);
# }
class OpBehaviorFloatDiv(OpBehavior):
"""
Behavior for the FLOAT_DIV operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_DIV, False)
# uintb OpBehaviorFloatDiv::evaluateBinary(int4 size_out,int4 size_in,uintb in1,uintb in2) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateBinary(size_out,size_in,in1,in2);
#
# return format->opDiv(in1,in2);
# }
class OpBehaviorFloatMult(OpBehavior):
"""
Behavior for the FLOAT_MULT operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_MULT, False)
# uintb OpBehaviorFloatMult::evaluateBinary(int4 size_out,int4 size_in,uintb in1,uintb in2) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateBinary(size_out,size_in,in1,in2);
#
# return format->opMult(in1,in2);
# }
class OpBehaviorFloatSub(OpBehavior):
"""
Behavior for the FLOAT_SUB operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_SUB, False)
# uintb OpBehaviorFloatSub::evaluateBinary(int4 size_out,int4 size_in,uintb in1,uintb in2) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateBinary(size_out,size_in,in1,in2);
#
# return format->opSub(in1,in2);
# }
class OpBehaviorFloatNeg(OpBehavior):
"""
Behavior for the FLOAT_NEG operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_NEG, True)
# uintb OpBehaviorFloatNeg::evaluateUnary(int4 size_out,int4 size_in,uintb in1) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateUnary(size_out,size_in,in1);
#
# return format->opNeg(in1);
# }
class OpBehaviorFloatAbs(OpBehavior):
"""
Behavior for the FLOAT_ABS operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_ABS, True)
# uintb OpBehaviorFloatAbs::evaluateUnary(int4 size_out,int4 size_in,uintb in1) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateUnary(size_out,size_in,in1);
#
# return format->opAbs(in1);
# }
class OpBehaviorFloatSqrt(OpBehavior):
"""
Behavior for the FLOAT_SQRT operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_SQRT, True)
# uintb OpBehaviorFloatSqrt::evaluateUnary(int4 size_out,int4 size_in,uintb in1) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateUnary(size_out,size_in,in1);
#
# return format->opSqrt(in1);
# }
class OpBehaviorFloatInt2Float(OpBehavior):
"""
Behavior for the FLOAT_INT2FLOAT operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_INT2FLOAT, True)
# uintb OpBehaviorFloatInt2Float::evaluateUnary(int4 size_out,int4 size_in,uintb in1) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_out);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateUnary(size_out,size_in,in1);
#
# return format->opInt2Float(in1,size_in);
# }
class OpBehaviorFloatFloat2Float(OpBehavior):
"""
Behavior for the FLOAT_FLOAT2FLOAT operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_FLOAT2FLOAT, True)
# uintb OpBehaviorFloatFloat2Float::evaluateUnary(int4 size_out,int4 size_in,uintb in1) const
#
# {
# const FloatFormat *formatout = translate->getFloatFormat(size_out);
# if (formatout == (const FloatFormat *)0)
# return OpBehavior::evaluateUnary(size_out,size_in,in1);
# const FloatFormat *formatin = translate->getFloatFormat(size_in);
# if (formatin == (const FloatFormat *)0)
# return OpBehavior::evaluateUnary(size_out,size_in,in1);
#
# return formatin->opFloat2Float(in1,*formatout);
# }
class OpBehaviorFloatTrunc(OpBehavior):
"""
Behavior for the FLOAT_TRUNC operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_TRUNC, True)
# uintb OpBehaviorFloatTrunc::evaluateUnary(int4 size_out,int4 size_in,uintb in1) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateUnary(size_out,size_in,in1);
#
# return format->opTrunc(in1,size_out);
# }
class OpBehaviorFloatCeil(OpBehavior):
"""
Behavior for the FLOAT_CEIL operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_CEIL, True)
# uintb OpBehaviorFloatCeil::evaluateUnary(int4 size_out,int4 size_in,uintb in1) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateUnary(size_out,size_in,in1);
#
# return format->opCeil(in1);
# }
class OpBehaviorFloatFloor(OpBehavior):
"""
Behavior for the FLOAT_FLOOR operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_FLOOR, True)
# uintb OpBehaviorFloatFloor::evaluateUnary(int4 size_out,int4 size_in,uintb in1) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateUnary(size_out,size_in,in1);
#
# return format->opFloor(in1);
# }
class OpBehaviorFloatRound(OpBehavior):
"""
Behavior for the FLOAT_ROUND operation.
"""
def __init__(self):
super().__init__(OpCode.FLOAT_ROUND, True)
# uintb OpBehaviorFloatRound::evaluateUnary(int4 size_out,int4 size_in,uintb in1) const
#
# {
# const FloatFormat *format = translate->getFloatFormat(size_in);
# if (format == (const FloatFormat *)0)
# return OpBehavior::evaluateUnary(size_out,size_in,in1);
#
# return format->opRound(in1);
# }
class OpBehaviorPiece(OpBehavior):
"""
Behavior for the PIECE operation.
"""
def __init__(self):
super().__init__(OpCode.PIECE, False)
# uintb OpBehaviorPiece::evaluateBinary(int4 size_out,int4 size_in,uintb in1,uintb in2) const
#
# {
# uintb res = ( in1<<((size_out-size_in)*8)) | in2;
# return res;
# }
class OpBehaviorSubpiece(OpBehavior):
"""
Behavior for the SUBPIECE operation.
"""
def __init__(self):
super().__init__(OpCode.SUBPIECE, False)
def evaluate_binary(self, size_out: int, size_in: int, in1: BV, in2: BV) -> BV:
if in2.size() < in1.size():
in2 = in2.sign_extend(in1.size() - in2.size())
return (in1>>(in2*8)) & (2**(size_out*8)-1)
class OpBehaviorPopcount(OpBehavior):
"""
Behavior for the POPCOUNT operation.
"""
def __init__(self):
super().__init__(OpCode.POPCOUNT, True)
def evaluate_unary(self, size_out: int, size_in: int, in1: BV) -> BV:
expr = claripy.BVV(0, size_out*8)
for a in range(len(in1)):
expr += claripy.Extract(a, a, in1).zero_extend(size_out*8-1)
return expr
class BehaviorFactory:
"""
Returns the behavior object for a given opcode.
"""
def __init__(self):
self._behaviors = {}
self._register_behaviors()
def get_behavior_for_opcode(self, opcode: int) -> OpBehavior:
return self._behaviors[opcode]
def _register_behaviors(self) -> None:
self._behaviors.update({
OpCode.COPY : OpBehaviorCopy(),
OpCode.LOAD : OpBehavior(OpCode.LOAD, False, True),
OpCode.STORE : OpBehavior(OpCode.STORE, False, True),
OpCode.BRANCH : OpBehavior(OpCode.BRANCH, False, True),
OpCode.CBRANCH : OpBehavior(OpCode.CBRANCH, False, True),
OpCode.BRANCHIND : OpBehavior(OpCode.BRANCHIND, False, True),
OpCode.CALL : OpBehavior(OpCode.CALL, False, True),
OpCode.CALLIND : OpBehavior(OpCode.CALLIND, False, True),
OpCode.CALLOTHER : OpBehavior(OpCode.CALLOTHER, False, True),
OpCode.RETURN : OpBehavior(OpCode.RETURN, False, True),
OpCode.MULTIEQUAL : OpBehavior(OpCode.MULTIEQUAL, False, True),
OpCode.INDIRECT : OpBehavior(OpCode.INDIRECT, False, True),
OpCode.PIECE : OpBehaviorPiece(),
OpCode.SUBPIECE : OpBehaviorSubpiece(),
OpCode.INT_EQUAL : OpBehaviorEqual(),
OpCode.INT_NOTEQUAL : OpBehaviorNotEqual(),
OpCode.INT_SLESS : OpBehaviorIntSless(),
OpCode.INT_SLESSEQUAL : OpBehaviorIntSlessEqual(),
OpCode.INT_LESS : OpBehaviorIntLess(),
OpCode.INT_LESSEQUAL : OpBehaviorIntLessEqual(),
OpCode.INT_ZEXT : OpBehaviorIntZext(),
OpCode.INT_SEXT : OpBehaviorIntSext(),
OpCode.INT_ADD : OpBehaviorIntAdd(),
OpCode.INT_SUB : OpBehaviorIntSub(),
OpCode.INT_CARRY : OpBehaviorIntCarry(),
OpCode.INT_SCARRY : OpBehaviorIntScarry(),
OpCode.INT_SBORROW : OpBehaviorIntSborrow(),
OpCode.INT_2COMP : OpBehaviorInt2Comp(),
OpCode.INT_NEGATE : OpBehaviorIntNegate(),
OpCode.INT_XOR : OpBehaviorIntXor(),
OpCode.INT_AND : OpBehaviorIntAnd(),
OpCode.INT_OR : OpBehaviorIntOr(),
OpCode.INT_LEFT : OpBehaviorIntLeft(),
OpCode.INT_RIGHT : OpBehaviorIntRight(),
OpCode.INT_SRIGHT : OpBehaviorIntSright(),
OpCode.INT_MULT : OpBehaviorIntMult(),
OpCode.INT_DIV : OpBehaviorIntDiv(),
OpCode.INT_SDIV : OpBehaviorIntSdiv(),
OpCode.INT_REM : OpBehaviorIntRem(),
OpCode.INT_SREM : OpBehaviorIntSrem(),
OpCode.BOOL_NEGATE : OpBehaviorBoolNegate(),
OpCode.BOOL_XOR : OpBehaviorBoolXor(),
OpCode.BOOL_AND : OpBehaviorBoolAnd(),
OpCode.BOOL_OR : OpBehaviorBoolOr(),
OpCode.CAST : OpBehavior(OpCode.CAST, False, True),
OpCode.PTRADD : OpBehavior(OpCode.PTRADD, False, True),
OpCode.PTRSUB : OpBehavior(OpCode.PTRSUB, False, True),
OpCode.FLOAT_EQUAL : OpBehaviorFloatEqual(),
OpCode.FLOAT_NOTEQUAL : OpBehaviorFloatNotEqual(),
OpCode.FLOAT_LESS : OpBehaviorFloatLess(),
OpCode.FLOAT_LESSEQUAL : OpBehaviorFloatLessEqual(),
OpCode.FLOAT_NAN : OpBehaviorFloatNan(),
OpCode.FLOAT_ADD : OpBehaviorFloatAdd(),
OpCode.FLOAT_DIV : OpBehaviorFloatDiv(),
OpCode.FLOAT_MULT : OpBehaviorFloatMult(),
OpCode.FLOAT_SUB : OpBehaviorFloatSub(),
OpCode.FLOAT_NEG : OpBehaviorFloatNeg(),
OpCode.FLOAT_ABS : OpBehaviorFloatAbs(),
OpCode.FLOAT_SQRT : OpBehaviorFloatSqrt(),
OpCode.FLOAT_INT2FLOAT : OpBehaviorFloatInt2Float(),
OpCode.FLOAT_FLOAT2FLOAT : OpBehaviorFloatFloat2Float(),
OpCode.FLOAT_TRUNC : OpBehaviorFloatTrunc(),
OpCode.FLOAT_CEIL : OpBehaviorFloatCeil(),
OpCode.FLOAT_FLOOR : OpBehaviorFloatFloor(),
OpCode.FLOAT_ROUND : OpBehaviorFloatRound(),
OpCode.SEGMENTOP : OpBehavior(OpCode.SEGMENTOP, False, True),
OpCode.CPOOLREF : OpBehavior(OpCode.CPOOLREF, False, True),
OpCode.NEW : OpBehavior(OpCode.NEW, False, True),
OpCode.INSERT : OpBehavior(OpCode.INSERT, False, True),
OpCode.EXTRACT : OpBehavior(OpCode.EXTRACT, False, True),
OpCode.POPCOUNT : OpBehaviorPopcount(),
})
|
|
from unittest.mock import patch
import graphene
from saleor.core.utils import get_country_name_by_code
from saleor.graphql.payment.enums import (
OrderAction, PaymentChargeStatusEnum, PaymentGatewayEnum)
from saleor.payment.models import ChargeStatus, Payment, TransactionKind
from tests.api.utils import get_graphql_content
VOID_QUERY = """
mutation PaymentVoid($paymentId: ID!) {
paymentVoid(paymentId: $paymentId) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_void_success(
staff_api_client, permission_manage_orders, payment_txn_preauth):
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id(
'Payment', payment_txn_preauth.pk)
variables = {'paymentId': payment_id}
response = staff_api_client.post_graphql(
VOID_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentVoid']
assert not data['errors']
payment_txn_preauth.refresh_from_db()
assert payment_txn_preauth.is_active is False
assert payment_txn_preauth.transactions.count() == 2
txn = payment_txn_preauth.transactions.last()
assert txn.kind == TransactionKind.VOID
def test_payment_void_gateway_error(
staff_api_client, permission_manage_orders, payment_txn_preauth,
monkeypatch):
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id(
'Payment', payment_txn_preauth.pk)
variables = {'paymentId': payment_id}
monkeypatch.setattr(
'saleor.payment.gateways.dummy.dummy_success', lambda: False)
response = staff_api_client.post_graphql(
VOID_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentVoid']
assert data['errors']
assert data['errors'][0]['field'] is None
assert data['errors'][0]['message'] == 'Unable to void the transaction.'
payment_txn_preauth.refresh_from_db()
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
assert payment_txn_preauth.is_active is True
assert payment_txn_preauth.transactions.count() == 2
txn = payment_txn_preauth.transactions.last()
assert txn.kind == TransactionKind.VOID
assert not txn.is_success
CREATE_QUERY = """
mutation CheckoutPaymentCreate($checkoutId: ID!, $input: PaymentInput!) {
checkoutPaymentCreate(checkoutId: $checkoutId, input: $input) {
payment {
transactions {
kind,
token
}
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_checkout_add_payment(
user_api_client, checkout_with_item, graphql_address_data):
checkout = checkout_with_item
checkout_id = graphene.Node.to_global_id('Checkout', checkout.pk)
variables = {
'checkoutId': checkout_id,
'input': {
'gateway': 'DUMMY',
'token': 'sample-token',
'amount': str(checkout.get_total().gross.amount),
'billingAddress': graphql_address_data}}
response = user_api_client.post_graphql(CREATE_QUERY, variables)
content = get_graphql_content(response)
data = content['data']['checkoutPaymentCreate']
assert not data['errors']
transactions = data['payment']['transactions']
assert not transactions
payment = Payment.objects.get()
assert payment.checkout == checkout
assert payment.is_active
assert payment.token == 'sample-token'
total = checkout.get_total().gross
assert payment.total == total.amount
assert payment.currency == total.currency
assert payment.charge_status == ChargeStatus.NOT_CHARGED
def test_use_checkout_billing_address_as_payment_billing(
user_api_client, checkout_with_item, address):
checkout = checkout_with_item
checkout_id = graphene.Node.to_global_id('Checkout', checkout.pk)
variables = {
'checkoutId': checkout_id,
'input': {
'gateway': 'DUMMY',
'token': 'sample-token',
'amount': str(checkout.get_total().gross.amount)}}
response = user_api_client.post_graphql(CREATE_QUERY, variables)
content = get_graphql_content(response)
data = content['data']['checkoutPaymentCreate']
# check if proper error is returned if address is missing
assert data['errors'][0]['field'] == 'billingAddress'
# assign the address and try again
address.street_address_1 = 'spanish-inqusition'
address.save()
checkout.billing_address = address
checkout.save()
response = user_api_client.post_graphql(CREATE_QUERY, variables)
content = get_graphql_content(response)
data = content['data']['checkoutPaymentCreate']
checkout.refresh_from_db()
assert checkout.payments.count() == 1
payment = checkout.payments.first()
assert payment.billing_address_1 == address.street_address_1
CAPTURE_QUERY = """
mutation PaymentCapture($paymentId: ID!, $amount: Decimal!) {
paymentCapture(paymentId: $paymentId, amount: $amount) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_capture_success(
staff_api_client, permission_manage_orders, payment_txn_preauth):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id(
'Payment', payment_txn_preauth.pk)
variables = {
'paymentId': payment_id,
'amount': str(payment_txn_preauth.total)}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentCapture']
assert not data['errors']
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
def test_payment_capture_with_invalid_argument(
staff_api_client, permission_manage_orders, payment_txn_preauth):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id(
'Payment', payment_txn_preauth.pk)
variables = {
'paymentId': payment_id,
'amount': 0}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentCapture']
assert len(data['errors']) == 1
assert data['errors'][0]['message'] == \
'Amount should be a positive number.'
def test_payment_capture_gateway_error(
staff_api_client, permission_manage_orders, payment_txn_preauth,
monkeypatch):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id(
'Payment', payment_txn_preauth.pk)
variables = {
'paymentId': payment_id,
'amount': str(payment_txn_preauth.total)}
monkeypatch.setattr(
'saleor.payment.gateways.dummy.dummy_success', lambda: False)
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentCapture']
assert data['errors']
assert data['errors'][0]['field'] is None
assert data['errors'][0]['message']
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
assert not txn.is_success
REFUND_QUERY = """
mutation PaymentRefund($paymentId: ID!, $amount: Decimal!) {
paymentRefund(paymentId: $paymentId, amount: $amount) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_refund_success(
staff_api_client, permission_manage_orders, payment_txn_captured):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id(
'Payment', payment.pk)
variables = {
'paymentId': payment_id,
'amount': str(payment.total)}
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentRefund']
assert not data['errors']
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_REFUNDED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.REFUND
def test_payment_refund_with_invalid_argument(
staff_api_client, permission_manage_orders, payment_txn_captured):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id(
'Payment', payment.pk)
variables = {
'paymentId': payment_id,
'amount': 0}
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentRefund']
assert len(data['errors']) == 1
assert data['errors'][0]['message'] == \
'Amount should be a positive number.'
def test_payment_refund_error(
staff_api_client, permission_manage_orders, payment_txn_captured,
monkeypatch):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id(
'Payment', payment.pk)
variables = {
'paymentId': payment_id,
'amount': str(payment.total)}
monkeypatch.setattr(
'saleor.payment.gateways.dummy.dummy_success', lambda: False)
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['paymentRefund']
assert data['errors']
assert data['errors'][0]['field'] is None
assert data['errors'][0]['message']
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.REFUND
assert not txn.is_success
def test_payments_query(
payment_txn_captured, permission_manage_orders, staff_api_client):
query = """ {
payments(first: 20) {
edges {
node {
id
gateway
capturedAmount {
amount
currency
}
total {
amount
currency
}
actions
chargeStatus
billingAddress {
country {
code
country
}
firstName
lastName
cityArea
countryArea
city
companyName
streetAddress1
streetAddress2
postalCode
}
transactions {
amount {
currency
amount
}
}
creditCard {
expMonth
expYear
brand
firstDigits
lastDigits
}
}
}
}
}
"""
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_orders])
content = get_graphql_content(response)
data = content['data']['payments']['edges'][0]['node']
pay = payment_txn_captured
assert data['gateway'] == pay.gateway
assert data['capturedAmount'] == {
'amount': pay.captured_amount, 'currency': pay.currency}
assert data['total'] == {'amount': pay.total, 'currency': pay.currency}
assert data['chargeStatus'] == PaymentChargeStatusEnum.FULLY_CHARGED.name
assert data['billingAddress'] == {
'firstName': pay.billing_first_name,
'lastName': pay.billing_last_name,
'city': pay.billing_city,
'cityArea': pay.billing_city_area,
'countryArea': pay.billing_country_area,
'companyName': pay.billing_company_name,
'streetAddress1': pay.billing_address_1,
'streetAddress2': pay.billing_address_2,
'postalCode': pay.billing_postal_code,
'country': {
'code': pay.billing_country_code,
'country': get_country_name_by_code(pay.billing_country_code)
}
}
assert data['actions'] == [OrderAction.REFUND.name]
txn = pay.transactions.get()
assert data['transactions'] == [{
'amount': {
'currency': pay.currency,
'amount': float(str(txn.amount))}}]
assert data['creditCard'] == {
'expMonth': pay.cc_exp_month,
'expYear': pay.cc_exp_year,
'brand': pay.cc_brand,
'firstDigits': pay.cc_first_digits,
'lastDigits': pay.cc_last_digits}
def test_query_payment(
payment_dummy, user_api_client, permission_manage_orders):
query = """
query payment($id: ID) {
payment(id: $id) {
id
}
}
"""
payment = payment_dummy
payment_id = graphene.Node.to_global_id('Payment', payment.pk)
variables = {'id': payment_id}
response = user_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders])
content = get_graphql_content(response)
received_id = content['data']['payment']['id']
assert received_id == payment_id
def test_query_payments(
payment_dummy, permission_manage_orders, staff_api_client):
query = """
{
payments(first: 20) {
edges {
node {
id
}
}
}
}
"""
payment = payment_dummy
payment_id = graphene.Node.to_global_id('Payment', payment.pk)
response = staff_api_client.post_graphql(
query, {}, permissions=[permission_manage_orders])
content = get_graphql_content(response)
edges = content['data']['payments']['edges']
payment_ids = [edge['node']['id'] for edge in edges]
assert payment_ids == [payment_id]
@patch('saleor.graphql.payment.resolvers.gateway_get_client_token')
def test_query_payment_client_token(mock_get_client_token, user_api_client):
query = """
query paymentClientToken($gateway: GatewaysEnum) {
paymentClientToken(gateway: $gateway)
}
"""
example_token = 'example-token'
mock_get_client_token.return_value = example_token
variables = {'gateway': PaymentGatewayEnum.BRAINTREE.name}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
assert mock_get_client_token.called_once_with(
PaymentGatewayEnum.BRAINTREE.name)
token = content['data']['paymentClientToken']
assert token == example_token
|
|
import re
from django import forms
from django.forms.util import ErrorList
from django.forms.widgets import HiddenInput, SelectMultiple
from crits.core import form_consts
from crits.core.handlers import get_source_names, get_item_names, ui_themes
from crits.core.user_role import UserRole
from crits.core.user_tools import get_user_organization
from crits.config.config import CRITsConfig
from crits import settings
def add_bucketlist_to_form(input_form):
"""
Add a bucket_list field to a form.
:param input_form: The form to add to.
:type input_form: :class:`django.forms.Form`
:returns: :class:`django.forms.Form`
"""
input_form.fields[form_consts.Common.BUCKET_LIST_VARIABLE_NAME] = \
forms.CharField(widget=forms.TextInput,
required=False,
label=form_consts.Common.BUCKET_LIST,
help_text="Use comma separated values.")
def add_ticket_to_form(input_form):
"""
Add a tickets field to a form.
:param input_form: The form to add to.
:type input_form: :class:`django.forms.Form`
:returns: :class:`django.forms.Form`
"""
input_form.fields[form_consts.Common.TICKET_VARIABLE_NAME] = \
forms.CharField(widget=forms.TextInput,
required=False,
label=form_consts.Common.TICKET,
help_text="Use comma separated values.")
class AddSourceForm(forms.Form):
"""
Django form for adding a new source to CRITs.
"""
error_css_class = 'error'
required_css_class = 'required'
source = forms.CharField(widget=forms.TextInput, required=True)
class AddReleasabilityForm(forms.Form):
"""
Django form for adding a new releasability instance to a top-level object.
"""
error_css_class = 'error'
required_css_class = 'required'
source = forms.ChoiceField(required=True, widget=forms.Select)
def __init__(self, username, *args, **kwargs):
super(AddReleasabilityForm, self).__init__(*args, **kwargs)
self.fields['source'].choices = [(c.name,
c.name) for c in get_source_names(True,
True,
username)]
class NavMenuForm(forms.Form):
"""
Django form for the user preferences navigation menu.
"""
error_css_class = 'error'
required_css_class = 'required'
DEFAULT_TExT_COLOR = "#FFF"
DEFAULT_BACKGROUND_COLOR = '#464646'
DEFAULT_HOVER_TEXT_COLOR = '#39F'
DEFAULT_HOVER_BACKGROUND_COLOR = '#6F6F6F'
nav_menu = forms.ChoiceField(widget=forms.RadioSelect(), initial="default",
help_text="Colors currently only work with topmenu. \
Examples of valid color codes: #39F or #9AAED8.")
text_color = forms.CharField(label="Text Color", initial=DEFAULT_TExT_COLOR,
help_text="Default: " + DEFAULT_TExT_COLOR)
background_color = forms.CharField(label="Background Color", initial=DEFAULT_BACKGROUND_COLOR,
help_text="Default: " + DEFAULT_BACKGROUND_COLOR)
hover_text_color = forms.CharField(label="Hover Text Color", initial=DEFAULT_HOVER_TEXT_COLOR,
help_text="Default: " + DEFAULT_HOVER_TEXT_COLOR)
hover_background_color = forms.CharField(label="Hover Background Color", initial=DEFAULT_HOVER_BACKGROUND_COLOR,
help_text="Default: " + DEFAULT_HOVER_BACKGROUND_COLOR)
def __init__(self, request, *args, **kwargs):
super(NavMenuForm, self).__init__(*args, **kwargs)
prefs = request.user.prefs
for k in prefs.nav:
if k in self.fields:
self.fields[k].initial = prefs.nav[k]
self.fields['nav_menu'].choices = [('default','default'),
('topmenu','topmenu')]
def clean(self):
cleaned_data = super(NavMenuForm, self).clean()
def check_hex_color(self, color_code, field_name):
if not re.match('^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_code):
self._errors.setdefault(field_name, ErrorList())
self._errors[field_name].append("This is not a valid color code. Valid examples: #39F or #9AAED8")
check_hex_color(self, cleaned_data.get('text_color'), 'text_color')
check_hex_color(self, cleaned_data.get('background_color'), 'background_color')
check_hex_color(self, cleaned_data.get('hover_text_color'), 'hover_text_color')
check_hex_color(self, cleaned_data.get('hover_background_color'), 'hover_background_color')
return cleaned_data
class PrefUIForm(forms.Form):
"""
Django form for the user preferences interface.
"""
error_css_class = 'error'
required_css_class = 'required'
theme = forms.ChoiceField(required=True, widget=forms.Select,
initial="default")
# layeredthemes = forms.MultipleChoiceField(required=True,
# label="Layer Themes",
# help_text="Pick Themes to use",
# widget=forms.SelectMultiple)
table_page_size = forms.IntegerField(required=True, min_value = 2, max_value = 10000,
initial=25)
def __init__(self, request, *args, **kwargs):
super(PrefUIForm, self).__init__(*args, **kwargs)
prefs = request.user.prefs
for k in prefs.ui:
if k in self.fields:
self.fields[k].initial = prefs.ui[k]
# self.fields['layeredthemes'].choices = self.fields['theme'].choices
self.fields['theme'].choices = [(t,
t) for t in ui_themes()]
class ToastNotificationConfigForm(forms.Form):
"""
Django form for the user toast notifications.
"""
error_css_class = 'error'
required_css_class = 'required'
enabled = forms.BooleanField(initial=True, required=False)
max_visible_notifications = forms.IntegerField(min_value = 1,
max_value = 10,
initial=5,
required=False,
label="Max Visible Notifications")
acknowledgement_type = forms.ChoiceField(widget=forms.Select,
initial="sticky",
required=False,
label="Acknowledgement Type")
notification_anchor_location = forms.ChoiceField(widget=forms.Select,
initial="bottom_right",
required=False,
label="Anchor Location")
newer_notifications_location = forms.ChoiceField(widget=forms.Select,
initial="top",
required=False,
label="Newer Notifications Located")
initial_notifications_display = forms.ChoiceField(widget=forms.Select,
initial="show",
required=False,
label="On New Notifications")
timeout = forms.IntegerField(min_value = 5,
max_value = 3600,
initial=30,
required=False,
label="Timeout (in seconds)",
help_text="Used only if Acknowledgement Type is set to 'timeout'")
def __init__(self, request, *args, **kwargs):
super(ToastNotificationConfigForm, self).__init__(*args, **kwargs)
prefs = request.user.prefs
if hasattr(prefs, 'toast_notifications'):
for k in prefs.toast_notifications:
if k in self.fields:
self.fields[k].initial = prefs.toast_notifications[k]
self.fields['acknowledgement_type'].choices = [("sticky", "sticky"),
("timeout", "timeout")]
self.fields['notification_anchor_location'].choices = [("top_right", "top_right"),
("bottom_right", "bottom_right")]
self.fields['newer_notifications_location'].choices = [("top", "top"),
("bottom", "bottom")]
self.fields['initial_notifications_display'].choices = [("show", "show"),
("hide", "hide")]
class AddUserRoleForm(forms.Form):
"""
Django form for adding a new user role.
"""
error_css_class = 'error'
required_css_class = 'required'
role = forms.CharField(widget=forms.TextInput, required=True)
class DownloadFileForm(forms.Form):
"""
Django form for downloading a top-level object.
"""
error_css_class = 'error'
required_css_class = 'required'
obj_type = forms.CharField(widget=HiddenInput)
obj_id = forms.CharField(widget=HiddenInput)
objects = forms.MultipleChoiceField(required=True, label="Objects",
help_text="Objects to collect",
widget=forms.SelectMultiple)
depth_limit = forms.CharField(widget=forms.TextInput, required=False,
label="Depth",
initial=0,
help_text="Depth levels to traverse.<br />" +
"0 for this object only. Max: %i")
total_limit = forms.CharField(widget=forms.TextInput, required=False,
label="Maximum",
help_text="Total objects to return. Max: %i")
rel_limit = forms.CharField(widget=forms.TextInput, required=False,
label="Relationships",
help_text="If an object has more relationships<br />" +
"than this, ignore it. Max: %i")
rst_fmt = forms.ChoiceField(choices=[("zip", "zip"),
("stix", "STIX"),
("stix_no_bin", "STIX (no binaries)")],
label="Result format")
bin_fmt = forms.ChoiceField(choices=[("raw", "raw"),
("base64", "base64"),
("zlib", "zlib")],
label="Binary format")
def __init__(self, *args, **kwargs):
crits_config = CRITsConfig.objects().first()
depth_max = getattr(crits_config, 'depth_max', settings.DEPTH_MAX)
total_max = getattr(crits_config, 'total_max', settings.TOTAL_MAX)
rel_max = getattr(crits_config, 'rel_max', settings.REL_MAX)
super(DownloadFileForm, self).__init__(*args, **kwargs)
self.fields['objects'].choices = [('Actor', 'Actors'),
('Certificate', 'Certificates'),
('Domain', 'Domains'),
('Email', 'Emails'),
('Indicator', 'Indicators'),
('PCAP', 'PCAPs'),
('RawData', 'Raw Data'),
('Sample', 'Samples')]
self.fields['total_limit'].initial = total_max
self.fields['rel_limit'].initial = rel_max
self.fields['depth_limit'].help_text = self.fields['depth_limit'].help_text % depth_max
self.fields['total_limit'].help_text = self.fields['total_limit'].help_text % total_max
self.fields['rel_limit'].help_text = self.fields['rel_limit'].help_text % rel_max
class TLDUpdateForm(forms.Form):
"""
Django form to update the TLD list.
"""
error_css_class = 'error'
required_css_class = 'required'
filedata = forms.FileField()
class SourceAccessForm(forms.Form):
"""
Django form for updating a user's profile and source access.
"""
error_css_class = 'error'
required_css_class = 'required'
username = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
first_name = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
last_name = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
email = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
sources = forms.MultipleChoiceField(required=True,
widget=SelectMultiple(attrs={'class':'multiselect',
'style': 'height: auto;'}))
organization = forms.ChoiceField(required=True, widget=forms.Select)
role = forms.ChoiceField(required=True, widget=forms.Select)
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'textbox'}),
required=False)
totp = forms.BooleanField(initial=False, required=False)
secret = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
subscriptions = forms.CharField(required=False, widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
super(SourceAccessForm, self).__init__(*args, **kwargs)
self.fields['sources'].choices = [(c.name,
c.name) for c in get_source_names(False,
False,
None)]
self.fields['role'].choices = [(c.name,
c.name) for c in get_item_names(UserRole,
True)]
self.fields['organization'].choices = [(c.name,
c.name) for c in get_source_names(True,
False,
None)]
class SourceForm(forms.Form):
"""
Django form to add source information to a top-level object.
"""
error_css_class = 'error'
required_css_class = 'required'
name = forms.ChoiceField(required=True, widget=forms.Select)
date = forms.CharField(widget=HiddenInput(attrs={'readonly': 'readonly',
'id': 'source_added_date'}),
required=False)
method = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}),
required=False)
reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}),
required=False)
analyst = forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'}))
def __init__(self, username, *args, **kwargs):
super(SourceForm, self).__init__(*args, **kwargs)
self.fields['name'].choices = [(c.name,
c.name) for c in get_source_names(True,
True,
username)]
self.fields['name'].initial = get_user_organization(username)
class TicketForm(forms.Form):
"""
Django form to add a ticket to a top-level object.
"""
error_css_class = 'error'
required_css_class = 'required'
ticket_number = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
date = forms.CharField( widget=forms.HiddenInput(attrs={'size': '50',
'readonly':'readonly',
'id':'id_indicator_ticket_date'}))
|
|
class index_h(object):
kinds = dict({
# Declarations
1: 't', # CXCursor_UnexposedDecl # A declaration whose specific kind
# is not exposed via this interface
2: 't', # CXCursor_StructDecl (A C or C++ struct)
3: 't', # CXCursor_UnionDecl (A C or C++ union)
4: 't', # CXCursor_ClassDecl (A C++ class)
5: 't', # CXCursor_EnumDecl (An enumeration)
6: 'm', # CXCursor_FieldDecl (A field (in C) or non-static data member
# (in C++) in a struct, union, or C++ class)
7: 'e', # CXCursor_EnumConstantDecl (An enumerator constant)
8: 'f', # CXCursor_FunctionDecl (A function)
9: 'v', # CXCursor_VarDecl (A variable)
10: 'a', # CXCursor_ParmDecl (A function or method parameter)
11: '11', # CXCursor_ObjCInterfaceDecl (An Objective-C @interface)
12: '12', # CXCursor_ObjCCategoryDecl (An Objective-C @interface for a
# category)
13: '13', # CXCursor_ObjCProtocolDecl
# (An Objective-C @protocol declaration)
14: '14', # CXCursor_ObjCPropertyDecl (An Objective-C @property declaration)
15: '15', # CXCursor_ObjCIvarDecl (An Objective-C instance variable)
16: '16', # CXCursor_ObjCInstanceMethodDecl
# (An Objective-C instance method)
17: '17', # CXCursor_ObjCClassMethodDecl
# (An Objective-C class method)
18: '18', # CXCursor_ObjCImplementationDec
# (An Objective-C @implementation)
19: '19', # CXCursor_ObjCCategoryImplDecll
# (An Objective-C @implementation for a category)
20: 't', # CXCursor_TypedefDecl (A typedef)
21: 'f', # CXCursor_CXXMethod (A C++ class method)
22: 'n', # CXCursor_Namespace (A C++ namespace)
23: '23', # CXCursor_LinkageSpec (A linkage specification,e.g. Extern "C")
24: '+', # CXCursor_Constructor (A C++ constructor)
25: '~', # CXCursor_Destructor (A C++ destructor)
26: '26', # CXCursor_ConversionFunction (A C++ conversion function)
27: 'a', # CXCursor_TemplateTypeParameter (A C++ template type parameter)
28: 'a', # CXCursor_NonTypeTemplateParameter (A C++ non-type template parameter)
29: 'a', # CXCursor_TemplateTemplateParameter (A C++ template template parameter)
30: 'f', # CXCursor_FunctionTemplate (A C++ function template)
31: 'p', # CXCursor_ClassTemplate (A C++ class template)
32: '32', # CXCursor_ClassTemplatePartialSpecialization
# (A C++ class template partial specialization)
33: 'n', # CXCursor_NamespaceAlias (A C++ namespace alias declaration)
34: '34', # CXCursor_UsingDirective (A C++ using directive)
35: '35', # CXCursor_UsingDeclaration (A C++ using declaration)
36: 't', # CXCursor_TypeAliasDecl (A C++ alias declaration)
37: '37', # CXCursor_ObjCSynthesizeDecl (An Objective-C synthesize definition)
38: '38', # CXCursor_ObjCDynamicDecl (An Objective-C dynamic definition)
39: '39', # CXCursor_CXXAccessSpecifier (An access specifier)
# References
40: '40', # CXCursor_ObjCSuperClassRef
41: '41', # CXCursor_ObjCProtocolRef
42: '42', # CXCursor_ObjCClassRef
43: '43', # CXCursor_TypeRef
44: '44', # CXCursor_CXXBaseSpecifier
45: '45', # CXCursor_TemplateRef
# (A reference to a class template, function template, template
# template parameter, or class template partial specialization)
46: '46', # CXCursor_NamespaceRef (A ref to a namespace or namespace alias)
47: '47', # CXCursor_MemberRef (A reference to a member of a struct, union,
# or class that occurs in some non-expression context,
# e.g., a designated initializer)
48: '48', # CXCursor_LabelRef (A reference to a labeled statement)
49: '49', # CXCursor_OverloadedDeclRef
# (A reference to a set of overloaded functions or function
# templates that has not yet been resolved to a specific
# function or function template)
50: '50', # CXCursor_VariableRef
# Error conditions
# 70: '70', # CXCursor_FirstInvalid
70: '70', # CXCursor_InvalidFile
71: '71', # CXCursor_NoDeclFound
72: 'u', # CXCursor_NotImplemented
73: '73', # CXCursor_InvalidCode
# Expressions
100: '100', # CXCursor_UnexposedExpr (An expression whose specific kind is
# not exposed via this interface)
101: '101', # CXCursor_DeclRefExpr (An expression that refers to some value
# declaration, such as a function, varible, or enumerator)
102: '102', # CXCursor_MemberRefExpr (An expression that refers to a member
# of a struct, union, class, Objective-C class, etc)
103: '103', # CXCursor_CallExpr (An expression that calls a function)
104: '104', # CXCursor_ObjCMessageExpr (An expression that sends a message
# to an Objective-C object or class)
105: '105', # CXCursor_BlockExpr (An expression that represents a block
# literal)
106: '106', # CXCursor_IntegerLiteral (An integer literal)
107: '107', # CXCursor_FloatingLiteral (A floating point number literal)
108: '108', # CXCursor_ImaginaryLiteral (An imaginary number literal)
109: '109', # CXCursor_StringLiteral (A string literal)
110: '110', # CXCursor_CharacterLiteral (A character literal)
111: '111', # CXCursor_ParenExpr (A parenthesized expression, e.g. "(1)")
112: '112', # CXCursor_UnaryOperator (This represents the unary-expression's
# (except sizeof and alignof))
113: '113', # CXCursor_ArraySubscriptExpr ([C99 6.5.2.1] Array Subscripting)
114: '114', # CXCursor_BinaryOperator (A builtin binary operation expression
# such as "x + y" or "x <= y")
115: '115', # CXCursor_CompoundAssignOperator (Compound assignment such as
# "+=")
116: '116', # CXCursor_ConditionalOperator (The ?: ternary operator)
117: '117', # CXCursor_CStyleCastExpr (An explicit cast in C (C99 6.5.4) or
# C-style cast in C++ (C++ [expr.cast]), which uses the syntax
# (Type)expr)
118: '118', # CXCursor_CompoundLiteralExpr ([C99 6.5.2.5])
119: '119', # CXCursor_InitListExpr (Describes an C or C++ initializer list)
120: '120', # CXCursor_AddrLabelExpr (The GNU address of label extension,
# representing &&label)
121: '121', # CXCursor_StmtExpr (This is the GNU Statement Expression
# extension: ({int X=4; X;})
122: '122', # CXCursor_GenericSelectionExpr (brief Represents a C11 generic
# selection)
123: '123', # CXCursor_GNUNullExpr (Implements the GNU __null extension)
124: '124', # CXCursor_CXXStaticCastExpr (C++'s static_cast<> expression)
125: '125', # CXCursor_CXXDynamicCastExpr (C++'s dynamic_cast<> expression)
126: '126', # CXCursor_CXXReinterpretCastExpr (C++'s reinterpret_cast<>
# expression)
127: '127', # CXCursor_CXXConstCastExpr (C++'s const_cast<> expression)
128: '128', # CXCursor_CXXFunctionalCastExpr (Represents an explicit C++ type
# conversion that uses "functional" notion
# (C++ [expr.type.conv]))
129: '129', # CXCursor_CXXTypeidExpr (A C++ typeid expression
# (C++ [expr.typeid]))
130: '130', # CXCursor_CXXBoolLiteralExpr (brief [C++ 2.13.5] C++ Boolean
# Literal)
131: '131', # CXCursor_CXXNullPtrLiteralExpr ([C++0x 2.14.7] C++ Pointer
# Literal)
132: '132', # CXCursor_CXXThisExpr (Represents the "this" expression in C+)
133: '133', # CXCursor_CXXThrowExpr ([C++ 15] C++ Throw Expression)
134: '134', # CXCursor_CXXNewExpr (A new expression for memory allocation
# and constructor calls)
135: '135', # CXCursor_CXXDeleteExpr (A delete expression for memory
# deallocation and destructor calls)
136: '136', # CXCursor_UnaryExpr (A unary expression)
137: '137', # CXCursor_ObjCStringLiteral (An Objective-C string literal
# i.e. @"foo")
138: '138', # CXCursor_ObjCEncodeExpr (An Objective-C @encode expression)
139: '139', # CXCursor_ObjCSelectorExpr (An Objective-C @selector expression)
140: '140', # CXCursor_ObjCProtocolExpr (An Objective-C @protocol expression)
141: '141', # CXCursor_ObjCBridgedCastExpr (An Objective-C "bridged" cast
# expression, which casts between Objective-C pointers and C
# pointers, transferring ownership in the process)
142: '142', # CXCursor_PackExpansionExpr (Represents a C++0x pack expansion
# that produces a sequence of expressions)
143: '143', # CXCursor_SizeOfPackExpr (Represents an expression that computes
# the length of a parameter pack)
144: '144', # CXCursor_LambdaExpr (Represents a C++ lambda expression that
# produces a local function object)
145: '145', # CXCursor_ObjCBoolLiteralExpr (Objective-c Boolean Literal)
# Statements
200: '200', # CXCursor_UnexposedStmt (A statement whose specific kind is not
# exposed via this interface)
201: '201', # CXCursor_LabelStmt (A labelled statement in a function)
202: '202', # CXCursor_CompoundStmt (A group of statements like
# { stmt stmt }.
203: '203', # CXCursor_CaseStmt (A case statment)
204: '204', # CXCursor_DefaultStmt (A default statement)
205: '205', # CXCursor_IfStmt (An if statemen)
206: '206', # CXCursor_SwitchStmt (A switch statement)
207: '207', # CXCursor_WhileStmt (A while statement)
208: '208', # CXCursor_DoStmt (A do statement)
209: '209', # CXCursor_ForStmt (A for statement)
210: '210', # CXCursor_GotoStmt (A goto statement)
211: '211', # CXCursor_IndirectGotoStmt (An indirect goto statement)
212: '212', # CXCursor_ContinueStmt (A continue statement)
213: '213', # CXCursor_BreakStmt (A break statement)
214: '214', # CXCursor_ReturnStmt (A return statement)
215: '215', # CXCursor_GCCAsmStmt (A GCC inline assembly statement extension)
216: '216', # CXCursor_ObjCAtTryStmt (Objective-C's overall try-catch-finally
# statement.
217: '217', # CXCursor_ObjCAtCatchStmt (Objective-C's catch statement)
218: '218', # CXCursor_ObjCAtFinallyStmt (Objective-C's finally statement)
219: '219', # CXCursor_ObjCAtThrowStmt (Objective-C's throw statement)
220: '220', # CXCursor_ObjCAtSynchronizedStmt (Objective-C's synchronized
# statement)
221: '221', # CXCursor_ObjCAutoreleasePoolStmt (Objective-C's autorelease
# pool statement)
222: '222', # CXCursor_ObjCForCollectionStmt (Objective-C's collection
# statement)
223: '223', # CXCursor_CXXCatchStmt (C++'s catch statement)
224: '224', # CXCursor_CXXTryStmt (C++'s try statement)
225: '225', # CXCursor_CXXForRangeStmt (C++'s for (*: *) statement)
226: '226', # CXCursor_SEHTryStmt (Windows Structured Exception Handling's
# try statement)
227: '227', # CXCursor_SEHExceptStmt (Windows Structured Exception Handling's
# except statement.
228: '228', # CXCursor_SEHFinallyStmt (Windows Structured Exception
# Handling's finally statement)
229: '229', # CXCursor_MSAsmStmt (A MS inline assembly statement extension)
230: '230', # CXCursor_NullStmt (The null satement ";": C99 6.8.3p3)
231: '231', # CXCursor_DeclStmt (Adaptor class for mixing declarations with
# statements and expressions)
# Translation unit
300: '300', # CXCursor_TranslationUnit (Cursor that represents the
# translation unit itself)
# Attributes
400: '400', # CXCursor_UnexposedAttr (An attribute whose specific kind is
# not exposed via this interface)
401: '401', # CXCursor_IBActionAttr
402: '402', # CXCursor_IBOutletAttr
403: '403', # CXCursor_IBOutletCollectionAttr
404: '404', # CXCursor_CXXFinalAttr
405: '405', # CXCursor_CXXOverrideAttr
406: '406', # CXCursor_AnnotateAttr
407: '407', # CXCursor_AsmLabelAttr
# Preprocessing
500: '500', # CXCursor_PreprocessingDirective
501: 'd', # CXCursor_MacroDefinition
502: '502', # CXCursor_MacroInstantiation
503: '503', # CXCursor_InclusionDirective
# Modules
600: '600', # CXCursor_ModuleImportDecl (A module import declaration)
})
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class capolicy(base_resource) :
""" Configuration for contentadaptation policy resource. """
def __init__(self) :
self._name = ""
self._rule = ""
self._action = ""
self._undefaction = ""
self._comment = ""
self._logaction = ""
self._newname = ""
self._hits = 0
self._undefhits = 0
self._isdefault = False
self.___count = 0
@property
def name(self) :
ur"""Name for the content adaptation policy. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Can be changed after the policy is created.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the content adaptation policy. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Can be changed after the policy is created.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rule(self) :
ur"""Expression that determines which requests or responses match the content adaptation policy. When specifying the rule in the CLI, the description must be enclosed within double quotes.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
ur"""Expression that determines which requests or responses match the content adaptation policy. When specifying the rule in the CLI, the description must be enclosed within double quotes.
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def action(self) :
ur"""Name of content adaptation action to be executed when the rule is evaluated to true.
"""
try :
return self._action
except Exception as e:
raise e
@action.setter
def action(self, action) :
ur"""Name of content adaptation action to be executed when the rule is evaluated to true.
"""
try :
self._action = action
except Exception as e:
raise e
@property
def undefaction(self) :
try :
return self._undefaction
except Exception as e:
raise e
@undefaction.setter
def undefaction(self, undefaction) :
try :
self._undefaction = undefaction
except Exception as e:
raise e
@property
def comment(self) :
ur"""Information about the content adaptation policy.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
ur"""Information about the content adaptation policy.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def logaction(self) :
ur"""Name of messagelog action to use when a request matches this policy.
"""
try :
return self._logaction
except Exception as e:
raise e
@logaction.setter
def logaction(self, logaction) :
ur"""Name of messagelog action to use when a request matches this policy.
"""
try :
self._logaction = logaction
except Exception as e:
raise e
@property
def newname(self) :
ur"""New name for the content accelerator policy.<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
ur"""New name for the content accelerator policy.<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
@property
def hits(self) :
ur"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def undefhits(self) :
ur"""Number of Undef hits.
"""
try :
return self._undefhits
except Exception as e:
raise e
@property
def isdefault(self) :
ur"""A value of true is returned if it is a default ContentAdaptationpolicy.
"""
try :
return self._isdefault
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(capolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.capolicy
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add capolicy.
"""
try :
if type(resource) is not list :
addresource = capolicy()
addresource.name = resource.name
addresource.rule = resource.rule
addresource.action = resource.action
addresource.undefaction = resource.undefaction
addresource.comment = resource.comment
addresource.logaction = resource.logaction
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ capolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].rule = resource[i].rule
addresources[i].action = resource[i].action
addresources[i].undefaction = resource[i].undefaction
addresources[i].comment = resource[i].comment
addresources[i].logaction = resource[i].logaction
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete capolicy.
"""
try :
if type(resource) is not list :
deleteresource = capolicy()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ capolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ capolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update capolicy.
"""
try :
if type(resource) is not list :
updateresource = capolicy()
updateresource.name = resource.name
updateresource.rule = resource.rule
updateresource.action = resource.action
updateresource.comment = resource.comment
updateresource.logaction = resource.logaction
updateresource.undefaction = resource.undefaction
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ capolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].rule = resource[i].rule
updateresources[i].action = resource[i].action
updateresources[i].comment = resource[i].comment
updateresources[i].logaction = resource[i].logaction
updateresources[i].undefaction = resource[i].undefaction
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of capolicy resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = capolicy()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ capolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ capolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_name) :
ur""" Use this API to rename a capolicy resource.
"""
try :
renameresource = capolicy()
if type(resource) == cls :
renameresource.name = resource.name
else :
renameresource.name = resource
return renameresource.rename_resource(client,new_name)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the capolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = capolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = capolicy()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [capolicy() for _ in range(len(name))]
obj = [capolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = capolicy()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of capolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = capolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the capolicy resources configured on NetScaler.
"""
try :
obj = capolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of capolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = capolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class capolicy_response(base_response) :
def __init__(self, length=1) :
self.capolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.capolicy = [capolicy() for _ in range(length)]
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
import subprocess
import sys
import textwrap
from copy import deepcopy
from dataclasses import dataclass
from pathlib import Path
from typing import *
from typing import Match, Pattern
GIT_TEMPLATE = ""
@dataclass(eq=True, frozen=True)
class Option:
section: str
name: str
raw_contents: str
def __str__(self) -> str:
return f"{self.section}.{self.name}"
def clean_name(self) -> str:
return "{}.{}".format(self.section, re.sub("\\W", "", self.name).strip("_"))
@dataclass
class Section:
name: str
header: str
options: List[Option]
@dataclass
class Config:
prefix: str
sections: List[Section]
suffix: str
# Coincidentally, ^{/call} is the last entry in the file. There's a {/call} later, but
# it is indented.
main_re = re.compile(
"(?P<prefix>.*?)(?P<body>{call buckconfig.section}.*^{/call})\n+(?P<suffix>.*)",
re.MULTILINE | re.DOTALL,
)
section_re = re.compile(
r"""(?P<header>{call buckconfig.section}
\s*{param name: '(?P<name>\S+)' /}
\s*{param description}.*?
\s*{/param}
\s*{/call})(?P<inner>.*?)({call buckconfig.section}|\Z)""",
re.MULTILINE | re.DOTALL,
)
option_re = re.compile(
r"""(?P<raw>^{call buckconfig.entry}.*?{param section: '(?P<section>.+?)' /}.*?{param name: '(?P<name>.+?)' /}.*?^{/call})""",
re.MULTILINE | re.DOTALL,
)
def overlapping_iter(
regex: Pattern[str], data: str, group: str
) -> Generator[Match[str], None, None]:
"""Regex iterator that allows us to overlap a bit with other matches"""
start_idx = 0
while start_idx < len(data):
match = regex.match(data, start_idx)
if not match:
return
yield match
start_idx = match.end(group)
def get_options(path: Path) -> Config:
with open(path, "r") as fin:
data = fin.read()
global_match = main_re.match(data)
if not global_match:
raise ValueError("Could not match regex on file")
prefix = global_match.group("prefix")
suffix = global_match.group("suffix")
sections = []
for section_match in overlapping_iter(
section_re, global_match.group("body"), "inner"
):
options = []
for option_match in option_re.finditer(section_match.group("inner")):
options.append(
Option(
option_match.group("section"),
option_match.group("name"),
option_match.group("raw"),
)
)
sections.append(
Section(section_match.group("name"), section_match.group("header"), options)
)
suffix_idx = section_match.end("inner") + 1
return Config(prefix, sections, suffix)
def sort_one_option(config: Config, exclude: List[str]) -> Optional[Option]:
"""Here it is, an in the wild usecase for a bubble sort"""
changed_option = None
for section in config.sections:
for i in range(len(section.options) - 1):
curr_opt = section.options[i]
next_opt = section.options[i + 1]
full_name = str(curr_opt)
if curr_opt.name > next_opt.name and full_name not in exclude:
for j in range(i, len(section.options) - 1):
if section.options[j].name > section.options[j + 1].name:
section.options[j + 1], section.options[j] = (
section.options[j],
section.options[j + 1],
)
changed_option = section.options[j]
else:
break
return changed_option
return changed_option
def write_config(config: Config, dest: Path) -> None:
with open(dest, "w") as fout:
fout.write(config.prefix)
for section in config.sections:
fout.write(section.header)
for option in section.options:
fout.write("\n\n")
fout.write(option.raw_contents)
fout.write("\n\n")
fout.write(config.suffix)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"exclude",
nargs="*",
help="List of <section.option> to ignore when alphabetizing",
)
parser.add_argument(
"--list", action="store_true", help="Just list available options"
)
parser.add_argument("--no-commit", action="store_false", dest="commit")
parser.add_argument("--branch-template", default="reorder_docs_{option_name}")
parser.add_argument("--commit-template-file")
parser.add_argument(
"--buckconfig-docs-path",
type=Path,
default=Path("docs/files-and-dirs/buckconfig.soy"),
)
parser.add_argument(
"--dest", type=Path, default=Path("docs/files-and-dirs/buckconfig.soy")
)
args = parser.parse_args()
if args.commit and not args.commit_template_file:
parser.error(
"\n"
+ textwrap.fill(
"commit requested, but --commit-template-file was not specified. "
"Either specify the file, or run with --no-commit",
80,
)
)
return args
def git_is_clean() -> bool:
ret = subprocess.run(["git", "diff", "--quiet"])
if ret.returncode == 0:
return True
elif ret.returncode == 1:
return False
else:
ret.check_returncode()
def accept_git_changes() -> bool:
subprocess.run(["git", "diff"])
print(
"Commit previously shown changes? A new branch will be created, and summary "
"automatically added [y/N] ",
end="",
)
if input().lower().strip() == "y":
return True
else:
print("Not creating commit. Changes are present in your working directory")
return False
def create_branch(branch_template, changed_option) -> None:
branch_name = branch_template.format(option_name=changed_option.clean_name())
subprocess.run(["git", "branch", "--track", branch_name], check=True)
subprocess.run(["git", "checkout", branch_name], check=True)
def create_commit(commit_template_file, changed_option) -> None:
with open(commit_template_file, "r") as fin:
commit_template = fin.read()
commit_message = commit_template.format(option_name=changed_option.clean_name())
subprocess.run(
["git", "commit", "-a", "-F", "-"],
input=commit_message,
encoding="utf8",
check=True,
)
def main() -> int:
args = parse_args()
config = get_options(args.buckconfig_docs_path)
if args.list:
for section in config.sections:
for option in section.options:
print(f"{option.section}.{option.name}")
return 0
if args.commit and not git_is_clean():
print(
"In order to commit, you must have a clean repository to start with. "
"(run with --no-commit to skip automatically committing)"
)
return 1
changed_option = sort_one_option(config, args.exclude)
write_config(config, args.dest)
if changed_option and args.commit:
if accept_git_changes():
create_branch(args.branch_template, changed_option)
create_commit(args.commit_template_file, changed_option)
else:
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
#!/usr/bin/env python
import json
import bson
import sys
import inspect
from datetime import datetime
from multiprocessing import Process, cpu_count
try:
try:
from pymongo import MongoClient as Connection
except ImportError:
from pymongo import Connection
from pymongo.errors import ConnectionFailure, AutoReconnect, OperationFailure, ConfigurationError
except ImportError:
raise ImportError("Can't import pymongo. See http://api.mongodb.org/python/current/ for instructions on how to install pymongo.")
import mtools.mgenerate.operators as operators
from mtools.util.cmdlinetool import BaseCmdLineTool
class DateTimeEncoder(json.JSONEncoder):
""" custom datetime encoder for json output. """
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
try:
res = json.JSONEncoder.default(self, obj)
except TypeError:
res = str(obj)
return res
class InsertProcess(Process):
operator_classes = inspect.getmembers(operators, inspect.isclass)
def __init__(self, number, template, collection, args):
Process.__init__(self)
self.number = number
self.template = template
self.collection = collection
self.args = args
# add all operators classes from the operators module, pass in _decode method
self.operators = [c[1](self._decode) for c in self.operator_classes]
self.string_operators = {}
self.dict_operators = {}
# separate into key and value operators
for o in self.operators:
if o.string_format:
for name in o.names:
self.string_operators[name] = o
if o.dict_format:
for name in o.names:
self.dict_operators[name] = o
def run(self):
batch = []
batchsize = 0
for n in xrange(self.number):
# decode the template
doc = self._decode(self.template)
if not self.collection:
indent = 4 if self.args['pretty'] else None
print json.dumps(doc, cls=DateTimeEncoder, indent=indent, ensure_ascii=False)
else:
batch.append(doc)
batchsize += self.bsonsize(doc)
if n % 1000 == 0 or batchsize >= 1000000:
self.collection.insert(batch)
batch = []
batchsize = 0
if self.collection:
if batch:
self.collection.insert(batch)
def bsonsize(self, doc):
return len(bson.BSON.encode(doc))
def _decode_operator(self, data):
if isinstance(data, str):
# string-format operator
return self._decode(self.string_operators[data]())
# dict-format operators should only ever have one key
assert len(data.keys()) == 1
key = data.keys()[0]
value = data[key]
# call operator with parameters (which will recursively evaluate sub-documents) and return result
return self._decode(self.dict_operators[key](value))
def _decode_list(self, data):
rv = []
for item in data:
item = self._decode(item)
if item != "$missing":
rv.append(item)
return rv
def _decode_dict(self, data):
rv = {}
for key, value in data.iteritems():
key = self._decode(key)
value = self._decode(value)
if value != "$missing":
rv[key] = value
return rv
def _decode(self, data):
# if dict, check if it's a dict-format command
if isinstance(data, dict):
if data.keys()[0] in self.dict_operators:
return self._decode_operator(data)
else:
return self._decode_dict(data)
# decode as list
if isinstance(data, list):
return self._decode_list(data)
# if it's a unicode string, encode as utf-8
if isinstance(data, unicode):
data = data.encode('utf-8')
# decode string-format commands
if isinstance(data, str) and data != "$missing" and data in self.string_operators:
return self._decode_operator(data)
# everything else, just return the data as is
return data
class MGeneratorTool(BaseCmdLineTool):
def __init__(self):
BaseCmdLineTool.__init__(self)
self.argparser.description = 'Script to generate pseudo-random data based on template documents.'
self.argparser.add_argument('template', action='store', help='template for data generation, JSON or file')
self.argparser.add_argument('--number', '-n', action='store', type=int, metavar='NUM', default=1, help='number of documents to insert.')
self.argparser.add_argument('--host', action='store', default='localhost', help='mongod/s host to import data, default=localhost')
self.argparser.add_argument('--port', action='store', default=27017, type=int, help='mongod/s port to import data, default=27017')
self.argparser.add_argument('--database', '-d', action='store', metavar='D', default='test', help='database D to insert data, default=test')
self.argparser.add_argument('--collection', '-c', action='store', metavar='C', default='mgendata', help='collection C to import data, default=mgendata')
self.argparser.add_argument('--drop', action='store_true', default=False, help='drop collection before inserting data')
self.argparser.add_argument('--stdout', action='store_true', default=False, help='prints data to stdout instead of inserting to mongod/s instance.')
self.argparser.add_argument('--pretty', action='store_true', default=False, help="if set, prettyfies the output to stdout (indented), requires --stdout")
self.argparser.add_argument('--write-concern', '-w', action='store', metavar="W", default=1, help='write concern for inserts, default=1')
self.argparser.add_argument('--processes', '-p', action='store', type=int, default=0, help='specify number of processes (# cpus by default)')
def run(self, arguments=None):
BaseCmdLineTool.run(self, arguments)
if self.args['template'].startswith('{'):
# not a file
try:
template = json.loads(self.args['template'])
except ValueError as e:
raise SystemExit("can't parse template: %s" % e)
else:
try:
f = open(self.args['template'])
except IOError as e:
raise SystemExit("can't open file %s: %s" % (self.args['template'], e))
try:
template = json.load(f)
except ValueError as e:
raise SystemExit("can't parse template in %s: %s" % (self.args['template'], e))
if not self.args['stdout']:
mc = Connection(host=self.args['host'], port=self.args['port'], w=self.args['write_concern'])
col = mc[self.args['database']][self.args['collection']]
if self.args['drop']:
col.drop()
else:
col = None
# divide work over number of cores
if self.args['stdout']:
num_cores = 1
elif self.args['processes'] > 0:
num_cores = self.args['processes']
else:
num_cores = cpu_count()
num_list = [self.args['number'] // num_cores] * num_cores
num_list[0] += self.args['number'] % num_cores
processes = []
for n in num_list:
p = InsertProcess(n, template, col, self.args)
p.start()
processes.append(p)
for p in processes:
p.join()
def main():
tool = MGeneratorTool()
tool.run()
if __name__ == '__main__':
sys.exit(main())
|
|
"""Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
Inf, cached_property, normalize_name, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS)
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, url_to_path, path_to_url
from pip.models import PyPI
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, pkg_resources, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.requests.exceptions import SSLError
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
SECURE_ORIGINS = [
# protocol, hostname, port
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls,
allow_external=(), allow_unverified=(),
allow_all_external=False, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip8Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(supported_tags)
if candidate.location == INSTALLED_VERSION:
pri = 1
elif candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the
existing ordering as secondary. See the docstring for `_link_sort_key`
for details. This function is isolated for easier unit testing.
"""
return sorted(
applicable_versions,
key=self._candidate_sort_key,
reverse=True
)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
# Check to see if the protocol matches
if origin[0] != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if origin[1] != secure_origin[1] and secure_origin[1] != "*":
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(url, project_url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
project_url_name = urllib_parse.quote(project_name.lower())
if self.index_urls:
# Check that we have the url_name correctly spelled:
# Only check main index if index URL is given
main_index_url = Link(
mkurl_pypi_url(self.index_urls[0]),
trusted=True,
)
page = self._get_page(main_index_url)
if page is None and PyPI.netloc not in str(main_index_url):
warnings.warn(
"Failed to find %r at %s. It is suggested to upgrade "
"your index to support normalized names as the name in "
"/simple/{name}." % (project_name, main_index_url),
RemovedInPip8Warning,
)
project_url_name = self._find_url_name(
Link(self.index_urls[0], trusted=True),
project_url_name,
) or project_url_name
if project_url_name is not None:
return [mkurl_pypi_url(url) for url in self.index_urls]
return []
def _find_all_versions(self, project_name):
"""Find all available versions for project_name
This checks index_urls, find_links and dependency_links
All versions found are returned
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url, trusted=True) for url in index_url_loc),
(Link(url, trusted=True) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = pkg_resources.safe_name(project_name).lower()
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name.lower(), canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f', trusted=True) for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find an InstallationCandidate for req
Expects req, an InstallRequirement and upgrade, a boolean
Returns an InstallationCandidate or None
May raise DistributionNotFound or BestVersionAlreadyInstalled
"""
all_versions = self._find_all_versions(req.name)
# Filter out anything which doesn't match our specifier
_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(x.version) for x in all_versions],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_versions = [
# Again, converting to str to deal with debundling.
x for x in all_versions if str(x.version) in _versions
]
if req.satisfied_by is not None:
# Finally add our existing versions to the front of our versions.
applicable_versions.insert(
0,
InstallationCandidate(
req.name,
req.satisfied_by.version,
INSTALLED_VERSION,
)
)
existing_applicable = True
else:
existing_applicable = False
applicable_versions = self._sort_versions(applicable_versions)
if not upgrade and existing_applicable:
if applicable_versions[0].location is INSTALLED_VERSION:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
req.satisfied_by.version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
req.satisfied_by.version,
applicable_versions[0][2],
)
return None
if not applicable_versions:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(i.version) for i in all_versions),
key=parse_version,
)
)
)
if self.need_warn_external:
logger.warning(
"Some externally hosted files were ignored as access to "
"them may be unreliable (use --allow-external %s to "
"allow).",
req.name,
)
if self.need_warn_unverified:
logger.warning(
"Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow).",
req.name,
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
if applicable_versions[0].location is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
req.satisfied_by.version,
', '.join(str(i.version) for i in applicable_versions[1:]) or
"none",
)
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.debug(
'Using version %s (newest of versions: %s)',
applicable_versions[0].version,
', '.join(str(i.version) for i in applicable_versions)
)
selected_version = applicable_versions[0].location
if (selected_version.verifiable is not None and not
selected_version.verifiable):
logger.warning(
"%s is potentially insecure and unverifiable.", req.name,
)
return selected_version
def _find_url_name(self, index_url, url_name):
"""
Finds the true URL name of a package, when the given name isn't quite
correct.
This is usually used to implement case-insensitivity.
"""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
# FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url)
if page is None:
logger.critical('Cannot fetch index base URL %s', index_url)
return
norm_name = normalize_name(url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.debug(
'Real name of requirement %s is %s', url_name, base,
)
return base
return None
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
normalized = normalize_name(project_name)
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
for link in page.rel_links():
if (normalized not in self.allow_external and not
self.allow_all_external):
self.need_warn_external = True
logger.debug(
"Not searching %s for files because external "
"urls are disallowed.",
link,
)
continue
if (link.trusted is not None and not
link.trusted and
normalized not in self.allow_unverified):
logger.debug(
"Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files.",
link,
)
self.need_warn_unverified = True
continue
all_locations.append(link)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if (pkg_resources.safe_name(wheel.name).lower() !=
search.canonical):
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported():
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for
# binary wheels on linux that deals with the inherent problems
# of binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if (
(
not platform.startswith('win') and not
platform.startswith('macosx') and not
platform == 'cli'
) and
comes_from is not None and
urllib_parse.urlparse(
comes_from.url
).netloc.endswith(PyPI.netloc)):
if not wheel.supported(tags=supported_tags_noarch):
self._log_skipped_link(
link,
"it is a pypi-hosted binary "
"Wheel on an unsupported platform",
)
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if (link.internal is not None and not
link.internal and not
normalize_name(search.supplied).lower()
in self.allow_external and not
self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
self._log_skipped_link(link, 'it is externally hosted')
self.need_warn_external = True
return
if (link.verifiable is not None and not
link.verifiable and not
(normalize_name(search.supplied).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify its integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
self._log_skipped_link(
link, 'it is an insecure and unverifiable file')
self.need_warn_unverified = True
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None, trusted=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(
resp.content, resp.url, resp.headers,
trusted=link.trusted,
)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(link, exc, url, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, level=2, meth=logger.info)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, level=1, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def api_version(self):
metas = [
x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"
]
if metas:
try:
return int(metas[0].get("value", None))
except (TypeError, ValueError):
pass
return None
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(
anchor.get("rel") and
"internal" in anchor.get("rel").split()
)
yield Link(url, self, internal=internal)
def rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self, trusted=False)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None):
# url can be a UNC windows share
if url != Inf and url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = pkg_resources.safe_name(name).lower()
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = set(["binary", "source"])
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
def fmt_ctl_no_use_wheel(fmt_ctl):
fmt_ctl_no_binary(fmt_ctl)
warnings.warn(
'--no-use-wheel is deprecated and will be removed in the future. '
' Please use --no-binary :all: instead.', DeprecationWarning,
stacklevel=2)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
iPOPO shell commands
Provides commands to the Pelix shell to get the state of iPOPO instances.
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.5.7
:status: Beta
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 5, 7)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Pelix
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Instantiate
import pelix.ipopo.constants
import pelix.shell
# Standard library
import logging
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
def ipopo_state_to_str(state):
"""
Converts the state of a component instance to its string representation
:param state: The state of an iPOPO component
:return: A string representation of the state
"""
ipopo_states = {0: "INVALID",
1: "VALID",
2: "KILLED",
3: "VALIDATING"}
return ipopo_states.get(state, "Unknown state ({0})".format(state))
# ------------------------------------------------------------------------------
@ComponentFactory("ipopo-shell-commands-factory")
@Requires("_ipopo", pelix.ipopo.constants.SERVICE_IPOPO)
@Requires("_utils", pelix.shell.SERVICE_SHELL_UTILS)
@Provides(pelix.shell.SERVICE_SHELL_COMMAND)
@Instantiate("ipopo-shell-commands")
class IPopoCommands(object):
"""
iPOPO shell commands
"""
def __init__(self):
"""
Sets up the object
"""
self._ipopo = None
self._utils = None
def get_namespace(self):
"""
Retrieves the name space of this command handler
"""
return "ipopo"
def get_methods(self):
"""
Retrieves the list of tuples (command, method) for this command handler
"""
return [("factories", self.list_factories),
("factory", self.factory_details),
("instances", self.list_instances),
("waiting", self.list_waitings),
("instance", self.instance_details),
("instantiate", self.instantiate),
("kill", self.kill)]
def list_factories(self, io_handler, name=None):
"""
Lists the available iPOPO component factories
"""
header = ('Factory', 'Bundle')
factories = self._ipopo.get_factories()
if name is not None:
# Filter factories by name
factories = [factory for factory in factories if name in factory]
lines = sorted((name, self._ipopo.get_factory_bundle(name))
for name in factories)
io_handler.write(self._utils.make_table(header, lines))
if name is None:
io_handler.write_line("{0} factories available", len(lines))
else:
io_handler.write_line("{0} filtered factories", len(lines))
def list_instances(self, io_handler, name=None):
"""
Lists the active iPOPO component instances
"""
headers = ('Name', 'Factory', 'State')
instances = self._ipopo.get_instances()
if name is not None:
# Filter instances by name
instances = [instance for instance in instances
if name in instance[0]]
# Lines are already sorted
lines = ((name, factory, ipopo_state_to_str(state))
for name, factory, state in instances)
io_handler.write(self._utils.make_table(headers, lines))
if name is None:
io_handler.write_line("{0} components running", len(instances))
else:
io_handler.write_line("{0} filtered components", len(instances))
def list_waitings(self, io_handler, name=None):
"""
Lists the components waiting to be instantiated
"""
headers = ('Name', 'Factory', 'Missing handlers')
components = self._ipopo.get_waiting_components()
if name is not None:
# Filter components by name
components = [component for component in components
if name in component[0]]
# Lines are already sorted
lines = ((name, factory, ', '.join(missing))
for name, factory, missing in components)
io_handler.write(self._utils.make_table(headers, lines))
if name is None:
io_handler.write_line("{0} components in the waiting queue",
len(components))
else:
io_handler.write_line("{0} filtered components", len(components))
def factory_details(self, io_handler, name):
"""
Prints the details of the given component factory
"""
try:
details = self._ipopo.get_factory_details(name)
except ValueError as ex:
io_handler.write_line("Error getting details about '{0}': {1}",
name, ex)
return
lines = [
"Name : {0}".format(details["name"]),
"Bundle: {0}".format(details["bundle"])]
properties = details.get('properties', None)
if properties:
lines.append("Properties:")
prop_headers = ('Key', 'Default value')
prop_lines = [(str(key), str(value))
for key, value in properties.items()]
lines.append(self._utils.make_table(prop_headers, prop_lines))
services = details.get('services', None)
if services:
lines.append("Provided services:")
lines.extend("\t{0}".format(spec) for spec in services)
lines.append('')
requirements = details.get('requirements', None)
if requirements:
lines.append("Requirements:")
req_headers = ('ID', 'Specification', 'Filter', 'Aggregate',
'Optional')
req_lines = [(item['id'], item['specification'], item['filter'],
item['aggregate'], item['optional'])
for item in requirements]
lines.append(self._utils.make_table(req_headers, req_lines, '\t'))
handlers = details.get('handlers', None)
if handlers:
lines.append("Handlers:")
handlers_headers = ('ID', 'Configuration')
handlers_lines = [(key, handlers[key]) for key in sorted(handlers)]
lines.append(self._utils.make_table(handlers_headers,
handlers_lines, '\t'))
io_handler.write('\n'.join(lines))
def instance_details(self, io_handler, name):
"""
Prints the details of the given component instance
"""
try:
details = self._ipopo.get_instance_details(name)
except ValueError as ex:
io_handler.write_line("Error getting details about '{0}': {1}",
name, ex)
return
# Basic information
lines = [
"Name.....: {0}".format(details["name"]),
"Factory..: {0}".format(details["factory"]),
"Bundle ID: {0}".format(details["bundle_id"]),
"State....: {0}".format(ipopo_state_to_str(details["state"])),
"Services.:"]
# Provided services
lines.extend("\t{0}".format(svc_reference)
for svc_reference in details["services"].values())
# Requirements
lines.append("Dependencies:")
for field, infos in details["dependencies"].items():
lines.append("\tField: {0}".format(field))
lines.append("\t\tSpecification: {0}"
.format(infos['specification']))
if "filter" in infos:
lines.append("\t\tFilter......: {0}".format(infos["filter"]))
lines.append("\t\tOptional.....: {0}".format(infos["optional"]))
lines.append("\t\tAggregate....: {0}".format(infos["aggregate"]))
lines.append("\t\tHandler......: {0}".format(infos["handler"]))
lines.append("\t\tBindings:")
for ref in infos["bindings"]:
lines.append('\t\t\t{0}'.format(ref))
# Properties
lines.append("Properties:")
lines.append(self._utils.make_table(
("Key", "Value"), sorted(details['properties'].items()), "\t"))
lines.append("")
io_handler.write('\n'.join(lines))
def instantiate(self, io_handler, factory, name, **kwargs):
"""
Instantiates a component of the given factory with the given name and
properties
"""
try:
self._ipopo.instantiate(factory, name, kwargs)
io_handler.write_line("Component '{0}' instantiated.", name)
except ValueError as ex:
io_handler.write_line("Invalid parameter: {0}", ex)
except TypeError as ex:
io_handler.write_line("Invalid factory: {0}", ex)
except Exception as ex:
io_handler.write_line("Error instantiating the component: {0}", ex)
_logger.exception("Error instantiating the component")
def kill(self, io_handler, name):
"""
Kills the given component instance
"""
try:
self._ipopo.kill(name)
io_handler.write_line("Component '{0}' killed.", name)
except ValueError as ex:
io_handler.write_line("Invalid parameter: {0}", ex)
|
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
#TODO: Better documentation
"""
SciTran NIMS and SDM archive to folder Reaper conversion utility.
This code will convert a NIMS v1.0 or an SDM tar file (including the DICOMS) to a folder
tree that the SciTran folder_reaper can ingest.
Users can optionally pass in group, project, and subject arguments. If these
arguments are not passed in they are gleaned from the folder structure within
the NIMS archive.
example usage:
archive_to_folder_reaper.py /path/to/sometar.tar /path/to/place/the/output
"""
import os
import sys
import time
import glob
import gzip
import dicom
import shutil
import zipfile
import tarfile
import logging
import argparse
import subprocess
from distutils.dir_util import copy_tree
logging.basicConfig(
format='%(asctime)s %(levelname)8.8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
log = logging.getLogger()
def extract_subject_id(root_path, args):
'''
If no subjectID is provided as input, we will attempt to extract the ID from a dicom.
If there are no dicom files then we use the name of the session folder to create a subject ID.
If there is a dicom file, we read it and use the field that was passed in - if no field was
passed in then we use values from the following fields, in order: PatientID, PatientName,
StudyID ('ex' + StudyID).
'''
log.info('No subjectID provided - Attempting to extract subject ID from dicom...')
subject_id = None
(file_paths, dir_paths, _, _, _) = get_paths(root_path)
dicom_dirs = [d for d in dir_paths if d.endswith('dicom')]
# Read the dicom file and return an id from (PatientID - PatientName - StudyDate+StudyTime)
if dicom_dirs:
dicom_files = [d for d in file_paths if d.startswith(dicom_dirs[0])]
dcm = dicom.read_file(dicom_files[0])
# Use the field that was passed in
if args.subject_id_field and dcm.get(args.subject_id_field):
subject_id = dcm.get(args.subject_id_field)
# Use the PatientID field
else:
if dcm.PatientID and dcm.PatientID != args.group: # Some users put the group in this field
subject_id = dcm.PatientID
subject_id = subject_id.split('@')[0]
if '/' in subject_id:# If the group/is still in the name then no subjectID was entered
subject_id = None
# Use the PatientName field
if not subject_id and dcm.PatientName:
subject_id = dcm.PatientName.replace('^',' ')
if subject_id[0] == ' ': # If the first char is a space, remove it
subject_id = subject_id[1:]
if subject_id.find(' ') > 0:
subject_id = None
# FIXME: This could be a proper name (remove it)
# Use StudyID
if not subject_id and dcm.StudyID:
subject_id = 'ex' + dcm.StudyID
# No dicoms - use the session folder name
if not subject_id or subject_id.isspace(): # This is empty b/c there are no dicoms, or the id field set failed
log.info('... subjectID could not be extraced from DICOM header - setting subjectID from session label')
subject_id = 'sub_' + os.path.basename(root_path).replace(' ', '_').replace(':','')
# Sanitize subject_id
subject_id = subject_id.replace(os.sep, '_')
log.info('... subjectID set to %s' % subject_id)
return subject_id
def screen_save_montage(dirs):
screen_saves = [f for f in dirs if f.endswith('Screen_Save')]
if screen_saves:
log.info('... %s screen saves to process' % str(len(screen_saves)))
for d in screen_saves:
pngs = glob.glob(d + '/*.png')
montage_name = pngs[0][:-5] + 'montage.png'
pngs = [shellquote(p) for p in pngs]
# Build the montage (requires imagemagick)
os.system('montage -geometry +4+4 ' + " ".join(pngs) + ' ' + shellquote(montage_name))
# Move the contents of this folder to the correct acquitision directory
ss_num = os.path.basename(d).split('_')[0][-2:] # This is the acquisition number we need
if ss_num[0] == '0': # Drop the leading zero if it's the first char
ss_num = ss_num[1:]
for target in dirs:
if os.path.basename(target).startswith(ss_num + '_'):
target_dir = target
break
shutil.move(montage_name, target_dir)
shutil.rmtree(d) # Remove the screen save folder
log.info('... done')
else:
log.info('... 0 screen saves found')
def extract_dicoms(files):
dicom_arcs = [f for f in files if f.endswith('_dicoms.tgz') or f.endswith('_dicom.tgz')]
if dicom_arcs:
log.info('... %s dicom archives to extract' % str(len(dicom_arcs)))
for f in dicom_arcs:
utd = untar(f, os.path.dirname(f))
del_files = ['._*', 'DIGEST.txt', 'METADATA.json', 'metadata.json', 'digest.txt']
for df in del_files:
[os.remove(d) for d in glob.glob(utd + '/' + df)]
log.debug('renaming %s' % utd)
# BUG:TODO: This can be an issue if there is more than one dicom archive per acquisition (see ex9407 on SNI-SDM)
os.rename(utd, os.path.join(os.path.dirname(utd), 'dicom'))
os.remove(f)
log.debug('Removing %s' % f)
log.info('... done')
else:
log.info('... 0 dicom archives found')
def extract_pfiles(files):
import zipfile
pfile_arcs = [f for f in files if f.endswith('_pfile.tgz')]
if pfile_arcs:
log.info('... %s pfile archives to extract' % str(len(pfile_arcs)))
for f in pfile_arcs:
utd = untar(f, os.path.dirname(f))
[_files, _dirs, _, _, _] = get_paths(utd)
# Remove the files that should not be in the archive
del_files = ['._*', 'DIGEST.txt', 'METADATA.json', 'metadata.json', 'digest.txt']
for df in del_files:
[os.remove(d) for d in glob.glob(utd + '/' + df)]
# Gzip the P-file prior to adding to the archive
for p in _files:
if p.endswith('.7') and not p.endswith('_refscan.7'):
gzfile = create_gzip(p, os.path.join(utd, p + '.gz'))
os.remove(p)
# Zip the utd directory
zipdir(utd, utd + '.7.zip', os.path.basename(utd))
# Clean up the directory and files
shutil.rmtree(utd)
os.remove(f)
log.info('... done')
else:
log.info('... 0 pfile archives found')
def extract_and_zip_physio(files):
physio_arcs = [f for f in files if f.endswith('_physio.tgz')]
if physio_arcs:
log.info('... %s physio archives to extract' % str(len(physio_arcs)))
for f in physio_arcs:
utd = untar(f, os.path.dirname(f))
create_archive(utd, utd)
os.rename(utd + '.zip', utd + '.gephysio.zip')
shutil.rmtree(utd)
os.remove(f)
log.info('... done')
else:
log.info('... 0 physio archives found')
def extract_physio(files):
physio_arcs = [f for f in files if f.endswith('.csv.gz')]
if physio_arcs:
log.info('... %s physio regressor file(s) to extract' % str(len(physio_arcs)))
for f in physio_arcs:
with gzip.open(f, 'rb') as in_file:
s = in_file.read()
with open(f[:-3], 'w') as a:
a.write(s)
os.remove(f)
else:
log.info('... 0 physio regressors found')
def prune_tree(files, args):
if args.prune:
log.debug('Pruning files that end with %s ' % args.prune)
for p in args.prune:
for f in files:
if f.endswith(p) and os.path.isfile(p):
os.remove(f)
log.debug('Pruning file %s ' % f)
###### UTILITIES ######
def shellquote(s):
return "'" + s.replace("'", "'\\''") + "'"
def get_paths(root_path):
file_paths = []
dir_paths = []
groups = []
projects = []
sessions = []
for (root, dirs, files) in os.walk(root_path):
for name in files:
file_paths.append(os.path.join(root, name))
for name in dirs:
dir_paths.append(os.path.join(root, name))
if len(dir_paths) > 3:
group_level = len(dir_paths[1].split(os.sep))
project_level = group_level + 1
session_level = project_level + 1
[groups.append(d) for d in dir_paths if len(d.split(os.sep)) == group_level]
[projects.append(d) for d in dir_paths if len(d.split(os.sep)) == project_level]
[sessions.append(d) for d in dir_paths if len(d.split(os.sep)) == session_level]
return (file_paths, dir_paths, groups, projects, sessions)
def untar(fname, path):
tar = tarfile.open(fname)
tar.extractall(path)
untar_dir = '.'
while untar_dir.startswith('.'):
for name in range(0, len(tar.getnames())):
untar_dir = os.path.dirname(tar.getnames()[name])
untar_dir = os.path.join(path, untar_dir)
tar.close()
return untar_dir
def create_archive(content_dir, arcname):
zipfilepath = content_dir + '.zip'
with zipfile.ZipFile(zipfilepath, 'w', zipfile.ZIP_DEFLATED, allowZip64=True) as zf:
zf.write(content_dir, arcname)
for fn in os.listdir(content_dir):
zf.write(os.path.join(content_dir, fn), os.path.join(os.path.basename(arcname), fn))
return zipfilepath
def zipdir(dirpath, zipname=None, arcbase=None):
if not arcbase:
arcbase = os.path.basename(dirpath)
if not zipname:
zipname = dirpath + '.zip'
zipf = zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
for root, dirs, files in os.walk(dirpath):
for _file in files:
zipf.write(os.path.join(root, _file), os.path.join(arcbase, _file))
zipf.close()
return zipname
def create_gzip(in_file, gz_file):
if not gz_file:
gz_file = in_file + '.gz'
with open(in_file, 'rb') as f_in, gzip.open(gz_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return gz_file
######################################################################################
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('tar_file', help='NIMS Tar File', type=str)
arg_parser.add_argument('output_path', help='path for untar data', type=str)
arg_parser.add_argument('-g', '--group', help='Group', type=str, default='')
arg_parser.add_argument('-p', '--project', help='project', type=str, default='')
arg_parser.add_argument('-s', '--subject', help='Subject Code', type=str, default='')
arg_parser.add_argument('-i', '--subject_id_field', help='Look here for the subject id', type=str, default='')
arg_parser.add_argument('-l', '--loglevel', default='info', help='log level [default=info]')
arg_parser.add_argument('--prune', action='append', help='Files that end with this string will be pruned from final tree.')
args = arg_parser.parse_args()
log.setLevel(getattr(logging, args.loglevel.upper()))
log.debug(args)
# Output directory will be named with the current date and time
output_path = os.path.join(os.path.realpath(args.output_path), time.strftime('%Y-%m-%d_%H_%M_%S'))
## 1. Make the output directory where the tar file will be extracted
os.mkdir(output_path)
## 2. Extract the nims tar file
log.info('Extracting %s to %s' % (args.tar_file, output_path))
untar(args.tar_file, output_path)
## 3. Generate file paths and directory paths
log.info('Extracting path and file info in %s' % output_path)
(file_paths, dir_paths, group_paths, project_paths, session_paths) = get_paths(output_path)
db_root_path = dir_paths[0] # sdm or nims path (removed later)
## 4. Handle missing arguments
if not args.group:
get_group = True
else:
get_group = False
if not args.project:
get_project = True
else:
get_project = False
if not args.subject:
get_subject_id = True
else:
get_subject_id = False
# Go through groups/projects/sessions
for group in group_paths:
if get_group == True:
args.group = os.path.basename(group)
log.debug(group)
log.debug(args)
projects = []
[projects.append(p) for p in project_paths if p.startswith(group)]
for project in projects:
if get_project == True:
args.project = os.path.basename(project)
log.debug(project)
log.debug(args)
sessions = []
[sessions.append(s) for s in session_paths if s.startswith(project)]
for session in sessions:
(file_paths, dir_paths, _, _, _) = get_paths(session)
log.debug(session)
log.debug(project)
log.debug(args)
## 5. Remove the 'qa.json' files (UI can't read them)
for f in file_paths:
if f.endswith('qa.json'):
os.remove(f)
## 6. Rename: qa file to [...].qa.png and montage to .montage.zip
for f in file_paths:
if f.endswith('_qa.png'):
new_name = f.replace('_qa.png', '.qa.png')
os.rename(f, new_name)
if f.endswith('_montage.zip'):
new_name = f.replace('_montage.zip', '.montage.zip')
os.rename(f, new_name)
## 7. Extract physio regressors (_physio_regressors.csv.gz)
log.info('Extracting physio regressors...')
extract_physio(file_paths)
## 8. Move _physio.tgz files to gephsio and zip (removing digest .txt)
log.info('Extracting and repackaging physio data...')
extract_and_zip_physio(file_paths)
## 9. Extract pfiles and remove the digest and metadata files and gzip the file
log.info('Extracting and repackaging pfiles...')
extract_pfiles(file_paths)
## 10. Extract all the dicom archives and rename to 'dicom'
log.info('Extracting dicom archives...')
extract_dicoms(file_paths)
## 11. Create a montage of the screen saves and move them to the correct acquisition
log.info('Processing screen saves...')
screen_save_montage(dir_paths)
## 12. Get the subjectID (if not passed in)
if get_subject_id == True:
args.subject = extract_subject_id(session, args)
## 13. Prune tree to remove unwanted files
prune_tree(file_paths, args)
## 14. Make the folder hierarchy and move the session to it's right place
log.info('Organizing final file structure...')
target_path = os.path.join(output_path, args.group, args.project, args.subject)
log.debug('Target Path: %s' % target_path)
log.debug(session)
if not os.path.isdir(target_path):
os.makedirs(target_path)
shutil.move(session, target_path) # Move the session to the target
## 15. Remove the db root folder
shutil.rmtree(db_root_path)
log.info("Done.")
print output_path
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, sys
from frappe import _
from frappe.utils import cint, flt, now, cstr, strip_html, getdate, get_datetime, to_timedelta
from frappe.model import default_fields
from frappe.model.naming import set_new_name
from frappe.modules import load_doctype_module
from frappe.model import display_fieldtypes
_classes = {}
def get_controller(doctype):
"""Returns the **class** object of the given DocType.
For `custom` type, returns `frappe.model.document.Document`.
:param doctype: DocType name as string."""
from frappe.model.document import Document
if not doctype in _classes:
module_name, custom = frappe.db.get_value("DocType", doctype, ["module", "custom"]) \
or ["Core", False]
if custom:
_class = Document
else:
module = load_doctype_module(doctype, module_name)
classname = doctype.replace(" ", "").replace("-", "")
if hasattr(module, classname):
_class = getattr(module, classname)
if issubclass(_class, BaseDocument):
_class = getattr(module, classname)
else:
raise ImportError, doctype
else:
raise ImportError, doctype
_classes[doctype] = _class
return _classes[doctype]
class BaseDocument(object):
ignore_in_getter = ("doctype", "_meta", "meta", "_table_fields", "_valid_columns")
def __init__(self, d):
self.update(d)
self.dont_update_if_missing = []
if hasattr(self, "__setup__"):
self.__setup__()
@property
def meta(self):
if not hasattr(self, "_meta"):
self._meta = frappe.get_meta(self.doctype)
return self._meta
def update(self, d):
if "doctype" in d:
self.set("doctype", d.get("doctype"))
# first set default field values of base document
for key in default_fields:
if key in d:
self.set(key, d.get(key))
for key, value in d.iteritems():
self.set(key, value)
return self
def update_if_missing(self, d):
if isinstance(d, BaseDocument):
d = d.get_valid_dict()
if "doctype" in d:
self.set("doctype", d.get("doctype"))
for key, value in d.iteritems():
# dont_update_if_missing is a list of fieldnames, for which, you don't want to set default value
if (self.get(key) is None) and (value is not None) and (key not in self.dont_update_if_missing):
self.set(key, value)
def get_db_value(self, key):
return frappe.db.get_value(self.doctype, self.name, key)
def get(self, key=None, filters=None, limit=None, default=None):
if key:
if isinstance(key, dict):
return _filter(self.get_all_children(), key, limit=limit)
if filters:
if isinstance(filters, dict):
value = _filter(self.__dict__.get(key, []), filters, limit=limit)
else:
default = filters
filters = None
value = self.__dict__.get(key, default)
else:
value = self.__dict__.get(key, default)
if value is None and key not in self.ignore_in_getter \
and key in (d.fieldname for d in self.meta.get_table_fields()):
self.set(key, [])
value = self.__dict__.get(key)
return value
else:
return self.__dict__
def getone(self, key, filters=None):
return self.get(key, filters=filters, limit=1)[0]
def set(self, key, value, as_value=False):
if isinstance(value, list) and not as_value:
self.__dict__[key] = []
self.extend(key, value)
else:
self.__dict__[key] = value
def delete_key(self, key):
if key in self.__dict__:
del self.__dict__[key]
def append(self, key, value=None):
if value==None:
value={}
if isinstance(value, (dict, BaseDocument)):
if not self.__dict__.get(key):
self.__dict__[key] = []
value = self._init_child(value, key)
self.__dict__[key].append(value)
# reference parent document
value.parent_doc = self
return value
else:
raise ValueError, "Document attached to child table must be a dict or BaseDocument, not " + str(type(value))[1:-1]
def extend(self, key, value):
if isinstance(value, list):
for v in value:
self.append(key, v)
else:
raise ValueError
def remove(self, doc):
self.get(doc.parentfield).remove(doc)
def _init_child(self, value, key):
if not self.doctype:
return value
if not isinstance(value, BaseDocument):
if "doctype" not in value:
value["doctype"] = self.get_table_field_doctype(key)
if not value["doctype"]:
raise AttributeError, key
value = get_controller(value["doctype"])(value)
value.init_valid_columns()
value.parent = self.name
value.parenttype = self.doctype
value.parentfield = key
if value.docstatus is None:
value.docstatus = 0
if not getattr(value, "idx", None):
value.idx = len(self.get(key) or []) + 1
if not getattr(value, "name", None):
value.__dict__['__islocal'] = 1
return value
def get_valid_dict(self):
d = {}
for fieldname in self.meta.get_valid_columns():
d[fieldname] = self.get(fieldname)
df = self.meta.get_field(fieldname)
if df:
if df.fieldtype=="Check" and not isinstance(d[fieldname], int):
d[fieldname] = cint(d[fieldname])
elif df.fieldtype in ("Datetime", "Date") and d[fieldname]=="":
d[fieldname] = None
elif df.get("unique") and cstr(d[fieldname]).strip()=="":
# unique empty field should be set to None
d[fieldname] = None
return d
def init_valid_columns(self):
for key in default_fields:
if key not in self.__dict__:
self.__dict__[key] = None
for key in self.get_valid_columns():
if key not in self.__dict__:
self.__dict__[key] = None
def get_valid_columns(self):
if self.doctype not in frappe.local.valid_columns:
if self.doctype in ("DocField", "DocPerm") and self.parent in ("DocType", "DocField", "DocPerm"):
from frappe.model.meta import get_table_columns
valid = get_table_columns(self.doctype)
else:
valid = self.meta.get_valid_columns()
frappe.local.valid_columns[self.doctype] = valid
return frappe.local.valid_columns[self.doctype]
def is_new(self):
return self.get("__islocal")
def as_dict(self, no_nulls=False, no_default_fields=False):
doc = self.get_valid_dict()
doc["doctype"] = self.doctype
for df in self.meta.get_table_fields():
children = self.get(df.fieldname) or []
doc[df.fieldname] = [d.as_dict(no_nulls=no_nulls) for d in children]
if no_nulls:
for k in doc.keys():
if doc[k] is None:
del doc[k]
if no_default_fields:
for k in doc.keys():
if k in default_fields:
del doc[k]
for key in ("_user_tags", "__islocal", "__onload", "_starred_by"):
if self.get(key):
doc[key] = self.get(key)
return frappe._dict(doc)
def as_json(self):
return frappe.as_json(self.as_dict())
def get_table_field_doctype(self, fieldname):
return self.meta.get_field(fieldname).options
def get_parentfield_of_doctype(self, doctype):
fieldname = [df.fieldname for df in self.meta.get_table_fields() if df.options==doctype]
return fieldname[0] if fieldname else None
def db_insert(self):
"""INSERT the document (with valid columns) in the database."""
if not self.name:
# name will be set by document class in most cases
set_new_name(self)
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""insert into `tab{doctype}`
({columns}) values ({values})""".format(
doctype = self.doctype,
columns = ", ".join(["`"+c+"`" for c in columns]),
values = ", ".join(["%s"] * len(columns))
), d.values())
except Exception, e:
if e.args[0]==1062:
if "PRIMARY" in cstr(e.args[1]):
if self.meta.autoname=="hash":
# hash collision? try again
self.name = None
self.db_insert()
return
type, value, traceback = sys.exc_info()
frappe.msgprint(_("Duplicate name {0} {1}").format(self.doctype, self.name))
raise frappe.DuplicateEntryError, (self.doctype, self.name, e), traceback
elif "Duplicate" in cstr(e.args[1]):
# unique constraint
self.show_unique_validation_message(e)
else:
raise
else:
raise
self.set("__islocal", False)
def db_update(self):
if self.get("__islocal") or not self.name:
self.db_insert()
return
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""update `tab{doctype}`
set {values} where name=%s""".format(
doctype = self.doctype,
values = ", ".join(["`"+c+"`=%s" for c in columns])
), d.values() + [d.get("name")])
except Exception, e:
if e.args[0]==1062 and "Duplicate" in cstr(e.args[1]):
self.show_unique_validation_message(e)
else:
raise
def show_unique_validation_message(self, e):
type, value, traceback = sys.exc_info()
fieldname = str(e).split("'")[-2]
label = fieldname if fieldname.startswith("unique_") else self.meta.get_label(fieldname)
frappe.msgprint(_("{0} must be unique".format(label)))
raise frappe.UniqueValidationError, (self.doctype, self.name, e), traceback
def db_set(self, fieldname, value, update_modified=True):
self.set(fieldname, value)
self.set("modified", now())
self.set("modified_by", frappe.session.user)
frappe.db.set_value(self.doctype, self.name, fieldname, value,
self.modified, self.modified_by, update_modified=update_modified)
def _fix_numeric_types(self):
for df in self.meta.get("fields"):
if df.fieldtype == "Check":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif self.get(df.fieldname) is not None:
if df.fieldtype == "Int":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif df.fieldtype in ("Float", "Currency", "Percent"):
self.set(df.fieldname, flt(self.get(df.fieldname)))
if self.docstatus is not None:
self.docstatus = cint(self.docstatus)
def _get_missing_mandatory_fields(self):
"""Get mandatory fields that do not have any values"""
def get_msg(df):
if df.fieldtype == "Table":
return "{}: {}: {}".format(_("Error"), _("Data missing in table"), _(df.label))
elif self.parentfield:
return "{}: {} #{}: {}: {}".format(_("Error"), _("Row"), self.idx,
_("Value missing for"), _(df.label))
else:
return "{}: {}: {}".format(_("Error"), _("Value missing for"), _(df.label))
missing = []
for df in self.meta.get("fields", {"reqd": 1}):
if self.get(df.fieldname) in (None, []) or not strip_html(cstr(self.get(df.fieldname))).strip():
missing.append((df.fieldname, get_msg(df)))
return missing
def get_invalid_links(self, is_submittable=False):
def get_msg(df, docname):
if self.parentfield:
return "{} #{}: {}: {}".format(_("Row"), self.idx, _(df.label), docname)
else:
return "{}: {}".format(_(df.label), docname)
invalid_links = []
cancelled_links = []
for df in self.meta.get_link_fields() + self.meta.get("fields",
{"fieldtype":"Dynamic Link"}):
docname = self.get(df.fieldname)
if docname:
if df.fieldtype=="Link":
doctype = df.options
if not doctype:
frappe.throw(_("Options not set for link field {0}").format(df.fieldname))
else:
doctype = self.get(df.options)
if not doctype:
frappe.throw(_("{0} must be set first").format(self.meta.get_label(df.options)))
# MySQL is case insensitive. Preserve case of the original docname in the Link Field.
value = frappe.db.get_value(doctype, docname, "name", cache=True)
setattr(self, df.fieldname, value)
if not value:
invalid_links.append((df.fieldname, docname, get_msg(df, docname)))
elif (df.fieldname != "amended_from"
and (is_submittable or self.meta.is_submittable) and frappe.get_meta(doctype).is_submittable
and cint(frappe.db.get_value(doctype, docname, "docstatus"))==2):
cancelled_links.append((df.fieldname, docname, get_msg(df, docname)))
return invalid_links, cancelled_links
def _validate_selects(self):
if frappe.flags.in_import:
return
for df in self.meta.get_select_fields():
if df.fieldname=="naming_series" or not (self.get(df.fieldname) and df.options):
continue
options = (df.options or "").split("\n")
# if only empty options
if not filter(None, options):
continue
# strip and set
self.set(df.fieldname, cstr(self.get(df.fieldname)).strip())
value = self.get(df.fieldname)
if value not in options and not (frappe.flags.in_test and value.startswith("_T-")):
# show an elaborate message
prefix = _("Row #{0}:").format(self.idx) if self.get("parentfield") else ""
label = _(self.meta.get_label(df.fieldname))
comma_options = '", "'.join(_(each) for each in options)
frappe.throw(_('{0} {1} cannot be "{2}". It should be one of "{3}"').format(prefix, label,
value, comma_options))
def _validate_constants(self):
if frappe.flags.in_import or self.is_new():
return
constants = [d.fieldname for d in self.meta.get("fields", {"set_only_once": 1})]
if constants:
values = frappe.db.get_value(self.doctype, self.name, constants, as_dict=True)
for fieldname in constants:
if self.get(fieldname) != values.get(fieldname):
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(fieldname)),
frappe.CannotChangeConstantError)
def _validate_update_after_submit(self):
db_values = frappe.db.get_value(self.doctype, self.name, "*", as_dict=True)
for key, db_value in db_values.iteritems():
df = self.meta.get_field(key)
if df and not df.allow_on_submit and (self.get(key) or db_value):
self_value = self.get_value(key)
if self_value != db_value:
frappe.throw(_("Not allowed to change {0} after submission").format(df.label),
frappe.UpdateAfterSubmitError)
def precision(self, fieldname, parentfield=None):
"""Returns float precision for a particular field (or get global default).
:param fieldname: Fieldname for which precision is required.
:param parentfield: If fieldname is in child table."""
from frappe.model.meta import get_field_precision
if parentfield and not isinstance(parentfield, basestring):
parentfield = parentfield.parentfield
cache_key = parentfield or "main"
if not hasattr(self, "_precision"):
self._precision = frappe._dict()
if cache_key not in self._precision:
self._precision[cache_key] = frappe._dict()
if fieldname not in self._precision[cache_key]:
self._precision[cache_key][fieldname] = None
doctype = self.meta.get_field(parentfield).options if parentfield else self.doctype
df = frappe.get_meta(doctype).get_field(fieldname)
if df.fieldtype in ("Currency", "Float", "Percent"):
self._precision[cache_key][fieldname] = get_field_precision(df, self)
return self._precision[cache_key][fieldname]
def get_formatted(self, fieldname, doc=None, currency=None, absolute_value=False):
from frappe.utils.formatters import format_value
df = self.meta.get_field(fieldname)
if not df and fieldname in default_fields:
from frappe.model.meta import get_default_df
df = get_default_df(fieldname)
val = self.get(fieldname)
if absolute_value and isinstance(val, (int, float)):
val = abs(self.get(fieldname))
return format_value(val, df=df, doc=doc or self, currency=currency)
def is_print_hide(self, fieldname, df=None, for_print=True):
"""Returns true if fieldname is to be hidden for print.
Print Hide can be set via the Print Format Builder or in the controller as a list
of hidden fields. Example
class MyDoc(Document):
def __setup__(self):
self.print_hide = ["field1", "field2"]
:param fieldname: Fieldname to be checked if hidden.
"""
meta_df = self.meta.get_field(fieldname)
if meta_df and meta_df.get("__print_hide"):
return True
if df:
return df.print_hide
if meta_df:
return meta_df.print_hide
def in_format_data(self, fieldname):
"""Returns True if shown via Print Format::`format_data` property.
Called from within standard print format."""
doc = getattr(self, "parent_doc", self)
if hasattr(doc, "format_data_map"):
return fieldname in doc.format_data_map
else:
return True
def reset_values_if_no_permlevel_access(self, has_access_to, high_permlevel_fields):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
to_reset = []
for df in high_permlevel_fields:
if df.permlevel not in has_access_to and df.fieldtype not in display_fieldtypes:
to_reset.append(df)
if to_reset:
if self.is_new():
# if new, set default value
ref_doc = frappe.new_doc(self.doctype)
else:
# get values from old doc
if self.parent:
self.parent_doc.get_latest()
ref_doc = [d for d in self.parent_doc.get(self.parentfield) if d.name == self.name][0]
else:
ref_doc = self.get_latest()
for df in to_reset:
self.set(df.fieldname, ref_doc.get(df.fieldname))
def get_value(self, fieldname):
df = self.meta.get_field(fieldname)
val = self.get(fieldname)
return self.cast(val, df)
def cast(self, val, df):
if df.fieldtype in ("Currency", "Float", "Percent"):
val = flt(val)
elif df.fieldtype in ("Int", "Check"):
val = cint(val)
elif df.fieldtype in ("Data", "Text", "Small Text", "Long Text",
"Text Editor", "Select", "Link", "Dynamic Link"):
val = cstr(val)
elif df.fieldtype == "Date":
val = getdate(val)
elif df.fieldtype == "Datetime":
val = get_datetime(val)
elif df.fieldtype == "Time":
val = to_timedelta(val)
return val
def _extract_images_from_text_editor(self):
from frappe.utils.file_manager import extract_images_from_doc
if self.doctype != "DocType":
for df in self.meta.get("fields", {"fieldtype":"Text Editor"}):
extract_images_from_doc(self, df.fieldname)
def _filter(data, filters, limit=None):
"""pass filters as:
{"key": "val", "key": ["!=", "val"],
"key": ["in", "val"], "key": ["not in", "val"], "key": "^val",
"key" : True (exists), "key": False (does not exist) }"""
out = []
for d in data:
add = True
for f in filters:
fval = filters[f]
if fval is True:
fval = ("not None", fval)
elif fval is False:
fval = ("None", fval)
elif not isinstance(fval, (tuple, list)):
if isinstance(fval, basestring) and fval.startswith("^"):
fval = ("^", fval[1:])
else:
fval = ("=", fval)
if not frappe.compare(getattr(d, f, None), fval[0], fval[1]):
add = False
break
if add:
out.append(d)
if limit and (len(out)-1)==limit:
break
return out
|
|
# Copyright (c) 2012 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
from m5.objects import *
class Port0_FU(FUDesc):
opList = [ OpDesc(opClass="IntAlu", opLat=1),
OpDesc(opClass="IntDiv", opLat=20, issueLat=20),
OpDesc(opClass="FloatMult", opLat=5),
OpDesc(opClass="FloatCvt", opLat=3),
OpDesc(opClass="FloatDiv", opLat=10),
OpDesc(opClass="FloatSqrt", opLat=10),
OpDesc(opClass="SimdFloatMult", opLat=5),
OpDesc(opClass="SimdFloatMultAcc", opLat=6),
OpDesc(opClass="SimdFloatCvt", opLat=3),
OpDesc(opClass="SimdFloatDiv", opLat=10),
OpDesc(opClass="SimdFloatSqrt", opLat=10),
OpDesc(opClass="SimdAddAcc", opLat=1),
OpDesc(opClass="SimdAdd", opLat=1),
OpDesc(opClass="SimdAlu", opLat=1),
OpDesc(opClass="SimdShiftAcc", opLat=1),
OpDesc(opClass="SimdShift", opLat=1) ]
count = 1
class Port1_FU(FUDesc):
opList = [ OpDesc(opClass="IntAlu", opLat=1),
OpDesc(opClass="IntMult", opLat=3),
OpDesc(opClass="IprAccess", opLat=3),
OpDesc(opClass="FloatAdd", opLat=3),
OpDesc(opClass="SimdFloatAlu", opLat=3),
OpDesc(opClass="SimdFloatAdd", opLat=3),
OpDesc(opClass="SimdMult", opLat=3),
OpDesc(opClass="SimdMultAcc", opLat=4),
OpDesc(opClass="SimdSqrt", opLat=4),
OpDesc(opClass="SimdCvt", opLat=3) ]
count = 1
class Port5_FU(FUDesc):
opList = [ OpDesc(opClass="IntAlu", opLat=1),
OpDesc(opClass="FloatCmp", opLat=1),
OpDesc(opClass="SimdFloatCmp", opLat=3),
OpDesc(opClass="SimdFloatMisc", opLat=3),
OpDesc(opClass="SimdCmp", opLat=1),
OpDesc(opClass="SimdMisc", opLat=3),
OpDesc(opClass="SimdAdd", opLat=1),
OpDesc(opClass="SimdAddAcc", opLat=1),
OpDesc(opClass="SimdShiftAcc", opLat=1),
OpDesc(opClass="SimdShift", opLat=1),
OpDesc(opClass="SimdAlu", opLat=1) ]
count = 1
# Load/Store Units
class O3_ARM_v7a_Load(FUDesc):
opList = [ OpDesc(opClass='MemRead',opLat=2) ]
count = 4
class O3_ARM_v7a_Store(FUDesc):
opList = [OpDesc(opClass='MemWrite',opLat=2) ]
count = 1
# Functional Units for this CPU
class O3_ARM_v7a_FUP(FUPool):
FUList = [Port0_FU(), Port1_FU(),
O3_ARM_v7a_Load(), O3_ARM_v7a_Store(), Port5_FU()]
# Bi-Mode Branch Predictor
class O3_ARM_v7a_BP(BranchPredictor):
predType = "tournament"
localCtrBits = 2
localHistoryTableSize = 64
globalPredictorSize = 8192
globalCtrBits = 2
choicePredictorSize = 8192
choiceCtrBits = 2
BTBEntries = 2048
BTBTagSize = 18
RASSize = 16
instShiftAmt = 2
# predType = "bi-mode"
# globalPredictorSize = 8192
# globalCtrBits = 2
# choicePredictorSize = 8192
# choiceCtrBits = 2
# BTBEntries = 2048
# BTBTagSize = 18
# RASSize = 16
# instShiftAmt = 2
class O3_ARM_v7a_3(DerivO3CPU):
LQEntries = 72
# LQEntries = 72 based on nehalem.cfg
SQEntries = 42
# SQEntries = 42 based on nehalem.cfg
LSQDepCheckShift = 0
LFSTSize = 1024
SSITSize = 1024
decodeToFetchDelay = 1
renameToFetchDelay = 1
iewToFetchDelay = 1
commitToFetchDelay = 1
renameToDecodeDelay = 1
iewToDecodeDelay = 1
commitToDecodeDelay = 1
iewToRenameDelay = 1
commitToRenameDelay = 1
commitToIEWDelay = 1
fetchWidth = 4
# fetchBufferSize = 16
fetchToDecodeDelay = 2
decodeWidth = 4
decodeToRenameDelay = 2
renameWidth = 4
renameToIEWDelay = 2
issueToExecuteDelay = 1
dispatchWidth = 4
issueWidth = 5
wbWidth = 5
# wbDepth = 1 Not supported
fuPool = O3_ARM_v7a_FUP()
iewToCommitDelay = 1
renameToROBDelay = 1
commitWidth = 4
squashWidth = 16
trapLatency = 13
backComSize = 10
forwardComSize = 5
numPhysIntRegs = 256
numPhysFloatRegs = 256
numIQEntries = 36
numROBEntries = 128
# numIQEntries = 64 based on nehalem.cfg
# numROBEntries = 192 based on nehalem.cfg
switched_out = False
branchPred = O3_ARM_v7a_BP()
# LQEntries = 16
# SQEntries = 16
# LSQDepCheckShift = 0
# LFSTSize = 1024
# SSITSize = 1024
# decodeToFetchDelay = 1
# renameToFetchDelay = 1
# iewToFetchDelay = 1
# commitToFetchDelay = 1
# renameToDecodeDelay = 1
# iewToDecodeDelay = 1
# commitToDecodeDelay = 1
# iewToRenameDelay = 1
# commitToRenameDelay = 1
# commitToIEWDelay = 1
# fetchWidth = 3
# fetchBufferSize = 16
# fetchToDecodeDelay = 3
# decodeWidth = 3
# decodeToRenameDelay = 2
# renameWidth = 3
# renameToIEWDelay = 1
# issueToExecuteDelay = 1
# dispatchWidth = 6
# issueWidth = 8
# wbWidth = 8
# fuPool = O3_ARM_v7a_FUP()
# iewToCommitDelay = 1
# renameToROBDelay = 1
# commitWidth = 8
# squashWidth = 8
# trapLatency = 13
# backComSize = 5
# forwardComSize = 5
# numPhysIntRegs = 128
# numPhysFloatRegs = 192
# numIQEntries = 32
# numROBEntries = 40
# switched_out = False
# branchPred = O3_ARM_v7a_BP()
# Instruction Cache
class O3_ARM_v7a_ICache(BaseCache):
hit_latency = 1
response_latency = 1
mshrs = 4
tgts_per_mshr = 16
size = '32kB'
assoc = 2
is_top_level = True
#prefetch_on_access = False # turning of prefetcher as it throws segfault
#prefetcher = TaggedPrefetcher(degree = 2, latency = 1)
# hit_latency = 1
# response_latency = 1
# mshrs = 2
# tgts_per_mshr = 8
# size = '32kB'
# assoc = 2
# is_top_level = True
# Data Cache
class O3_ARM_v7a_DCache(BaseCache):
hit_latency = 3
response_latency = 2
mshrs = 16
tgts_per_mshr = 16
size = '32kB'
assoc = 4
write_buffers = 16
is_top_level = True
#prefetch_on_access =False # turning of prefetcher as it throws segfaulte
#prefetcher = StridePrefetcher(degree = 2, latency = 1)
# hit_latency = 2
# response_latency = 2
# mshrs = 6
# tgts_per_mshr = 8
# size = '32kB'
# assoc = 2
# write_buffers = 16
# is_top_level = True
# TLB Cache
# Use a cache as a L2 TLB
class O3_ARM_v7aWalkCache(BaseCache):
hit_latency = 4
response_latency = 4
mshrs = 6
tgts_per_mshr = 8
size = '512B'
assoc = 4
write_buffers = 16
is_top_level = True
# hit_latency = 4
# response_latency = 4
# mshrs = 6
# tgts_per_mshr = 8
# size = '1kB'
# assoc = 8
# write_buffers = 16
# is_top_level = True
# L2 Cache
class O3_ARM_v7aL2(BaseCache):
hit_latency = 6
response_latency = 2
mshrs = 16
tgts_per_mshr = 16
size = '256kB'
assoc = 8
write_buffers = 8
#prefetch_on_access =False # turning of prefetcher as it throws segfaulte
# Simple stride prefetcher
#prefetcher = StridePrefetcher(degree=2, latency = 1)
# tags = RandomRepl()
# hit_latency = 12
# response_latency = 12
# mshrs = 16
# tgts_per_mshr = 8
# size = '1MB'
# assoc = 16
# write_buffers = 8
# prefetch_on_access = True
# # Simple stride prefetcher
# prefetcher = StridePrefetcher(degree=8, latency = 1)
# tags = RandomRepl()
# L3 Cache
class O3_ARM_v7aL3(BaseCache):
hit_latency = 14
response_latency = 10
mshrs = 16
tgts_per_mshr = 16
size = '4MB'
assoc = 16
write_buffers = 8
#prefetch_on_access =False # turning of prefetcher as it throws segfaulte
# Simple stride prefetcher
#prefetcher = StridePrefetcher(degree=2, latency = 1)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations:
"""RouteFilterRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> "_models.RouteFilterRule":
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_03_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2017_03_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_03_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs: Any
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the update route filter rule
operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2017_03_01.models.PatchRouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_03_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteFilterRuleListResult"]:
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_03_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
|
|
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import warnings
import hou
import toolutils
import IECore
import IECoreHoudini
class FnParameterisedHolder():
_nodeType = None
# create our function set and stash which node we're looking at
def __init__(self, node=None):
self.__node = node
# check this node is still valid
def nodeValid(self):
if not self.__node:
raise "FnParameterisedHolder does not have a node to operate on."
try:
p = self.__node.path()
return True
except hou.ObjectWasDeleted:
return False
# return the node we're currently wrapping
def node(self):
return self.__node if self.nodeValid() else None
@staticmethod
# nodeType: type of node to create (str)
# name: desired node name (str)
# className: class path to op stub (str)
# version: op version, or None for latest (int)
# envVarName: environment variable to use as a search path for ops (str)
# parent: parent node, or None to create a new /obj geo. Ignored if contextArgs is used in UI mode (hou.Node)
# contextArgs: args related to the creation context, as would come from UI menu interactions (dict)
# If empty or not in UI mode, will create a top level OBJ to house the new holder
def _doCreate( nodeType, name, className, version=None, envVarName=None, parent=None, contextArgs={} ) :
if hou.isUIAvailable() and contextArgs.get( "toolname", "" ) :
holder = toolutils.genericTool( contextArgs, nodeType, nodename = name )
else :
parent = parent if parent else hou.node( "/obj" ).createNode( "geo", node_name=name, run_init_scripts=False )
holder = parent.createNode( nodeType, node_name=name )
IECoreHoudini.FnParameterisedHolder( holder ).setParameterised( className, version, envVarName )
if contextArgs.get( "shiftclick", False ) :
converter = holder.parent().createNode( "ieCortexConverter", node_name = holder.name()+"Converter" )
outputNode = hou.node( contextArgs.get( "outputnodename", "" ) )
toolutils.connectInputsAndOutputs( converter, False, holder, outputNode, 0, 0 )
x, y = holder.position()
converter.setPosition( [x,y-1] )
return holder
# do we have a valid parameterised instance?
def hasParameterised( self ) :
return IECoreHoudini._IECoreHoudini._FnParameterisedHolder( self.node() ).hasParameterised() if self.nodeValid() else False
# this sets a parameterised object on our node and then updates the parameters
def setParameterised( self, classNameOrParameterised, classVersion=None, envVarName=None, updateGui=True ) :
if not self.nodeValid() :
return
if isinstance( classNameOrParameterised, str ) :
if classVersion is None or classVersion < 0 :
classVersions = IECore.ClassLoader.defaultLoader( envVarName ).versions( classNameOrParameterised )
classVersion = classVersions[-1] if classVersions else 0
IECoreHoudini._IECoreHoudini._FnParameterisedHolder( self.node() ).setParameterised( classNameOrParameterised, classVersion, envVarName )
else :
IECoreHoudini._IECoreHoudini._FnParameterisedHolder( self.node() ).setParameterised( classNameOrParameterised )
parameterised = self.getParameterised()
if updateGui and parameterised :
self.updateParameters( parameterised )
# this returns the parameterised object our node is working with
def getParameterised( self ) :
return IECoreHoudini._IECoreHoudini._FnParameterisedHolder( self.node() ).getParameterised() if self.hasParameterised() else None
def setParameterisedValues( self, time = None ) :
time = hou.time() if time is None else time
IECoreHoudini._IECoreHoudini._FnParameterisedHolder( self.node() ).setParameterisedValues( time )
# get our list of class names based on matchString
def classNames( self ) :
if not self.nodeValid() :
return []
matchString = self.__node.parm( "__classMatchString" ).eval()
searchPathEnvVar = self.__node.parm( "__classSearchPathEnvVar" ).eval()
return IECore.ClassLoader.defaultLoader( searchPathEnvVar ).classNames( matchString )
# takes a snapshot of the parameter values & expressions on our node so
# that if we change the procedural/op we can restore the parameters afterwards.
def cacheParameters(self):
cached_parameters = {}
for p in self.__node.parmTuplesInFolder(['Parameters']):
if p.isSpare():
data = {}
data['value'] = p.eval()
expressions = []
for i in range(len(p)):
try:
expr = p[i].expression()
lang = p[i].expressionLanguage()
expressions.append( ( expr, lang ) )
except:
expressions.append( ( None, None ) )
data['expressions'] = expressions
cached_parameters[p.name()] = data
return cached_parameters
# resores parameter values/expressions from those cached by cacheParameters
def restoreCachedParameters(self, cached):
for p in self.__node.parmTuplesInFolder(['Parameters']):
if p.name() in cached:
cached_data = cached[p.name()]
p.set( cached_data['value'] )
for i in range(len(p)):
if cached_data['expressions'][i][0]:
expr = cached_data['expressions'][i][0]
lang = cached_data['expressions'][i][1]
p[i].setExpression( expr, lang )
# return the spare parameters under the "Parameters" tab
def spareParameters( self, tuples=True ) :
result = []
for p in self.__node.spareParms() :
if "Parameters" in p.containingFolders() :
result.append( p.tuple() if tuples else p )
return result
# this method removes all spare parameters from the "Parameters" folder
def removeParameters( self ) :
if not self.nodeValid() :
return
spareParms = self.spareParameters()
while spareParms :
self.__node.removeSpareParmTuple( spareParms[0] )
# this is needed to account for parms removed by a containing folder
spareParms = self.spareParameters()
# add/remove parameters on our node so we correctly reflect our Procedural
def updateParameters( self, parameterised ) :
if not self.nodeValid():
return
# cache parameters & then remove them
cached_parameters = self.cacheParameters()
self.removeParameters()
if not parameterised:
return
# get a list of our parm templates by calling createParm on our top-level CompoundParameter
# and add them as spare parameter
parms = IECoreHoudini.ParmTemplates.createParm( parameterised.parameters(), top_level=True )
parm_names = []
for p in parms:
parm_names.append( p['name'] )
parm = self.__node.addSpareParmTuple( p['tuple'], in_folder=p['folder'], create_missing_folders=True )
parm.set( p['initialValue'] )
# restore our cached parameters
self.restoreCachedParameters( cached_parameters )
# update the nodes parameter evaluation expression
# this creates cook dependencies on the parameters
expr = ""
for p in parm_names:
expr += "if parmTuple('%s'):\n\t%s = evalParmTuple('%s')\n" % ( p, p, p )
expr += "return 1"
if len(parm_names)==0:
expr = "1"
eval_parm = self.__node.parm( "__evaluateParameters" )
eval_parm.lock(False)
eval_parm.setExpression( expr, language=hou.exprLanguage.Python, replace_expression=True )
eval_parm.lock(True)
|
|
import collections
from django import http
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from django.db import transaction
from django.views.decorators.cache import cache_page
from django.db.models import Q
from funfactory.urlresolvers import reverse
from jsonview.decorators import json_view
from airmozilla.manage.utils import filename_to_notes
from airmozilla.base.utils import dot_dict
from airmozilla.main.helpers import thumbnail
from airmozilla.main.models import Event, Picture
from airmozilla.manage import forms
from .decorators import staff_required, permission_required
from .utils import can_edit_event
@staff_required
def picturegallery(request):
context = {}
if request.GET.get('event'):
event = get_object_or_404(Event, id=request.GET.get('event'))
result = can_edit_event(
event,
request.user,
default='manage:picturegallery'
)
if isinstance(result, http.HttpResponse):
return result
context['event'] = event
return render(request, 'manage/picturegallery.html', context)
@staff_required
@json_view
def picturegallery_data(request):
context = {}
if request.GET.get('event'):
event = get_object_or_404(Event, id=request.GET['event'])
else:
event = None
items = _get_all_pictures(event=event)
context['pictures'] = items
context['urls'] = {
'manage:picture_edit': reverse('manage:picture_edit', args=('0',)),
'manage:picture_delete': reverse('manage:picture_delete', args=('0',)),
'manage:picture_delete_all': reverse(
'manage:picture_delete_all', args=('0',)
),
'manage:redirect_picture_thumbnail': reverse(
'manage:redirect_picture_thumbnail', args=('0',)
),
'manage:picture_event_associate': reverse(
'manage:picture_event_associate', args=('0',)
),
'manage:event_edit': reverse('manage:event_edit', args=('0',)),
}
context['stats'] = {
'total_pictures': Picture.objects.all().count(),
'event_pictures': Picture.objects.filter(event__isnull=False).count(),
}
return context
def _get_all_pictures(event=None):
values = (
'id',
'title',
'placeholder_img',
'picture_id',
# 'default_placeholder',
)
event_map = collections.defaultdict(list)
cant_delete = collections.defaultdict(bool)
for each in Event.objects.filter(picture__isnull=False).values(*values):
event_map[each['picture_id']].append({
'id': each['id'],
'title': each['title']
})
if not each['placeholder_img']:
# then you can definitely not delete this picture
cant_delete[each['picture_id']] = True
pictures = []
values = (
'id',
'size',
'width',
'height',
'notes',
'created',
'modified',
'modified_user',
'event_id',
'default_placeholder',
'is_active',
)
qs = Picture.objects.all()
if event:
qs = qs.filter(
Q(event__isnull=True) |
Q(event=event)
)
qs = qs.exclude(is_active=False)
else:
qs = qs.filter(event__isnull=True)
for picture_dict in qs.order_by('event', '-created').values(*values):
picture = dot_dict(picture_dict)
item = {
'id': picture.id,
'width': picture.width,
'height': picture.height,
'size': picture.size,
'created': picture.created.isoformat(),
'events': event_map[picture.id],
'event': picture.event_id,
'default_placeholder': picture.default_placeholder,
'is_active': picture.is_active,
}
if cant_delete.get(picture.id):
item['cant_delete'] = True
if picture.notes:
item['notes'] = picture.notes
# if picture.id in event_map:
# item['events'] = event_map[picture.id]
pictures.append(item)
return pictures
@staff_required
@permission_required('main.change_picture')
@transaction.commit_on_success
def picture_edit(request, id):
picture = get_object_or_404(Picture, id=id)
context = {'picture': picture}
if request.method == 'POST':
form = forms.PictureForm(request.POST, request.FILES, instance=picture)
if form.is_valid():
picture = form.save()
if picture.default_placeholder:
# make all others NOT-default
qs = (
Picture.objects
.exclude(id=picture.id)
.filter(default_placeholder=True)
)
for other in qs:
other.default_placeholder = False
other.save()
return redirect('manage:picturegallery')
else:
form = forms.PictureForm(instance=picture)
context['form'] = form
return render(request, 'manage/picture_edit.html', context)
@staff_required
@permission_required('main.delete_picture')
@transaction.commit_on_success
@json_view
def picture_delete(request, id):
picture = get_object_or_404(Picture, id=id)
for event in Event.objects.filter(picture=picture):
if not event.placeholder_img:
return http.HttpResponseBadRequest("Can't delete this")
picture.delete()
return True
@require_POST
@staff_required
@permission_required('main.delete_picture')
@transaction.commit_on_success
@json_view
def picture_delete_all(request, id):
event = get_object_or_404(Event, id=id)
pictures = Picture.objects.filter(event=event)
if event.picture and event.picture in pictures:
assert event.placeholder_img
event.picture = None
event.save()
pictures.delete()
return True
@staff_required
@permission_required('main.add_picture')
@transaction.commit_on_success
@json_view
def picture_add(request):
context = {}
if request.GET.get('event'):
event = get_object_or_404(Event, id=request.GET.get('event'))
result = can_edit_event(
event,
request.user,
default='manage:picturegallery'
)
if isinstance(result, http.HttpResponse):
return result
context['event'] = event
if request.method == 'POST':
if request.POST.get('remove'):
# this is for when you change your mind
size = request.POST['size']
filename = request.POST['name']
notes = filename_to_notes(filename)
matches = Picture.objects.filter(
notes=notes,
size=int(size),
modified_user=request.user
)
for picture in matches.order_by('-created')[:1]:
picture.delete()
return True
return False
form = forms.PictureForm(request.POST, request.FILES)
if form.is_valid():
picture = form.save(commit=False)
picture.modified_user = request.user
picture.save()
return redirect('manage:picturegallery')
else:
form = forms.PictureForm()
context['form'] = form
return render(request, 'manage/picture_add.html', context)
@cache_page(60)
def redirect_picture_thumbnail(request, id):
picture = get_object_or_404(Picture, id=id)
geometry = request.GET.get('geometry', '100x100')
crop = request.GET.get('crop', 'center')
thumb = thumbnail(picture.file, geometry, crop=crop)
return redirect(thumb.url)
@staff_required
@require_POST
@transaction.commit_on_success
@permission_required('main.change_event')
@json_view
def picture_event_associate(request, id):
picture = get_object_or_404(Picture, id=id)
if not request.POST.get('event'):
return http.HttpResponseBadRequest("Missing 'event'")
event = get_object_or_404(Event, id=request.POST['event'])
event.picture = picture
event.save()
return True
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for morphological filtering operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DilationTest(test.TestCase):
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):
"""Verifies the output values of the dilation function.
Args:
image: Input tensor with shape: [batch, in_height, in_width, channels].
kernel: Filter tensor with shape: [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
out: Expected output.
use_gpu: Whether we are running on GPU.
"""
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
out_tensor = nn_ops.dilation2d(
constant_op.constant(image),
constant_op.constant(kernel),
strides=strides,
rates=rates,
padding=padding,
name="dilation2d")
self.assertAllClose(out, self.evaluate(out_tensor))
def _testDilationValidPadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 1, 1, 1]
out = [[[[.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testDilationSamePadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 2, 2, 1]
out = [[[[.5], [.6]], [[.7], [.8]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingDepth(self, use_gpu):
# [1, 2, 2, 3]
image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]]
# [2, 2, 3]
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
# [1, 2, 2, 3]
out = [[[[.5, .7, .3], [.6, .8, .4]], [[.7, .9, .5], [.8, 1., .6]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingBatch(self, use_gpu):
# [2, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [2, 2, 2, 1]
out = [[[[.5], [.6]], [[.7], [.8]]], [[[.6], [.7]], [[.8], [.9]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationValidPaddingNonSquareWindow(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [1, 2, 1]
kernel = [[[.4], [.3]]]
# [1, 2, 1, 1]
out = [[[[.5]], [[.7]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingRate(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# Because rate = 2, the effective kernel is [3, 3, 1]:
# kernel_eff = [[[.4], [.0], [.3]],
# [[.0], [.0], [.0]],
# [[.1], [.0], [.2]]]
# [1, 3, 3, 1]
out = [[[[.7], [.8], [.6]], [[1.0], [1.1], [.9]], [[.8], [.9], [.9]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[2, 2],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationValidPaddingUnevenStride(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]],
[[.9], [1.0], [1.1], [1.2]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# [1, 2, 2, 1]
out = [[[[.8], [1.0]], [[1.2], [1.4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 2],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def testDilation(self):
for use_gpu in True, False:
self._testDilationValidPadding(use_gpu)
self._testDilationSamePadding(use_gpu)
self._testDilationSamePaddingDepth(use_gpu)
self._testDilationSamePaddingBatch(use_gpu)
self._testDilationValidPaddingNonSquareWindow(use_gpu)
self._testDilationSamePaddingRate(use_gpu)
self._testDilationValidPaddingUnevenStride(use_gpu)
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates,
padding, use_gpu):
"""Verifies the gradients of the dilation function.
Args:
image_shape: Input shape, [batch, in_height, in_width, channels].
kernel_shape: Filter shape, [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
use_gpu: Whether we are running on GPU.
"""
assert image_shape[3] == kernel_shape[2]
np.random.seed(1) # Make it reproducible.
image = np.random.random_sample(image_shape).astype(np.float32)
kernel = np.random.random_sample(kernel_shape).astype(np.float32)
image_init = np.random.random_sample(image_shape).astype(np.float32)
kernel_init = np.random.random_sample(kernel_shape).astype(np.float32)
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
image_tensor = constant_op.constant(
image, shape=image_shape, name="input")
kernel_tensor = constant_op.constant(
kernel, shape=kernel_shape, name="filter")
out_tensor = nn_ops.dilation2d(
image_tensor,
kernel_tensor,
strides=strides,
rates=rates,
padding=padding,
name="dilation2d")
out_shape = self.evaluate(out_tensor).shape
# Small delta is necessary for argmax to remain the same.
err = gradient_checker.compute_gradient_error(
[image_tensor, kernel_tensor], [image_shape, kernel_shape],
out_tensor,
out_shape, [image_init, kernel_init],
delta=1e-3)
print("Dilation gradient error = %f" % err)
self.assertLess(err, 1e-4)
def _testDilationGradValidPadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testDilationGradSamePadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePadding_1x1x2(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 2],
kernel_shape=[1, 1, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradValidPadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testDilationGradSamePadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePaddingBatch_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[4, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePadding_2x2x4(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 4],
kernel_shape=[2, 2, 4],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def testDilationGrad(self):
for use_gpu in True, False:
self._testDilationGradValidPadding_1x1x1(use_gpu)
self._testDilationGradSamePadding_1x1x1(use_gpu)
self._testDilationGradSamePadding_1x1x2(use_gpu)
self._testDilationGradValidPadding_2x2x1(use_gpu)
self._testDilationGradSamePadding_2x2x1(use_gpu)
self._testDilationGradSamePaddingBatch_2x2x1(use_gpu)
self._testDilationGradSamePadding_2x2x4(use_gpu)
class ErosionTest(test.TestCase):
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):
"""Verifies the output values of the erosion function.
Args:
image: Input tensor with shape: [batch, in_height, in_width, channels].
kernel: Filter tensor with shape: [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
out: Expected output.
use_gpu: Whether we are running on GPU.
"""
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
out_tensor = nn_ops.erosion2d(
constant_op.constant(image),
constant_op.constant(kernel),
strides=strides,
rates=rates,
padding=padding,
name="erosion2d")
self.assertAllClose(out, self.evaluate(out_tensor))
def _testErosionValidPadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 1, 1, 1]
out = [[[[.0]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testErosionSamePadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingDepth(self, use_gpu):
# [1, 2, 2, 3]
image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]]
# [2, 2, 3]
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
# [1, 2, 2, 3]
out = [[[[.0, .0, .0], [.1, .1, .1]], [[.3, .3, .3], [.4, .4, .4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingBatch(self, use_gpu):
# [2, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [2, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]], [[[.1], [.2]], [[.4], [.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionValidPaddingNonSquareWindow(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [1, 2, 1]
kernel = [[[.4], [.3]]]
# [1, 2, 1, 1]
out = [[[[-.2]], [[.0]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingRate(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# Because rate = 2, the effective kernel is [3, 3, 1]:
# kernel_eff = [[[.4], [.0], [.3]],
# [[.0], [.0], [.0]],
# [[.1], [.0], [.2]]]
# [1, 3, 3, 1]
out = [[[[.1], [.1], [.2]], [[0.1], [-.1], [.0]], [[.4], [.2], [.3]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[2, 2],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionValidPaddingUnevenStride(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]],
[[.9], [1.0], [1.1], [1.2]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# [1, 2, 2, 1]
out = [[[[-.1], [.1]], [[.3], [.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 2],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def testErosion(self):
for use_gpu in True, False:
self._testErosionValidPadding(use_gpu)
self._testErosionSamePadding(use_gpu)
self._testErosionSamePaddingDepth(use_gpu)
self._testErosionSamePaddingBatch(use_gpu)
self._testErosionValidPaddingNonSquareWindow(use_gpu)
self._testErosionSamePaddingRate(use_gpu)
self._testErosionValidPaddingUnevenStride(use_gpu)
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates,
padding, use_gpu):
"""Verifies the gradients of the erosion function.
Args:
image_shape: Input shape, [batch, in_height, in_width, channels].
kernel_shape: Filter shape, [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
use_gpu: Whether we are running on GPU.
"""
assert image_shape[3] == kernel_shape[2]
np.random.seed(1) # Make it reproducible.
image = np.random.random_sample(image_shape).astype(np.float32)
kernel = np.random.random_sample(kernel_shape).astype(np.float32)
image_init = np.random.random_sample(image_shape).astype(np.float32)
kernel_init = np.random.random_sample(kernel_shape).astype(np.float32)
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
image_tensor = constant_op.constant(
image, shape=image_shape, name="input")
kernel_tensor = constant_op.constant(
kernel, shape=kernel_shape, name="filter")
out_tensor = nn_ops.erosion2d(
image_tensor,
kernel_tensor,
strides=strides,
rates=rates,
padding=padding,
name="erosion2d")
out_shape = self.evaluate(out_tensor).shape
# Small delta is necessary for argmax to remain the same.
err = gradient_checker.compute_gradient_error(
[image_tensor, kernel_tensor], [image_shape, kernel_shape],
out_tensor,
out_shape, [image_init, kernel_init],
delta=1e-3)
print("Erosion gradient error = %f" % err)
self.assertLess(err, 1e-4)
def _testErosionGradValidPadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x2(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 2],
kernel_shape=[1, 1, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradValidPadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePaddingBatch_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[4, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x4(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 4],
kernel_shape=[2, 2, 4],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def testErosionGrad(self):
for use_gpu in True, False:
self._testErosionGradValidPadding_1x1x1(use_gpu)
self._testErosionGradSamePadding_1x1x1(use_gpu)
self._testErosionGradSamePadding_1x1x2(use_gpu)
self._testErosionGradValidPadding_2x2x1(use_gpu)
self._testErosionGradSamePadding_2x2x1(use_gpu)
self._testErosionGradSamePaddingBatch_2x2x1(use_gpu)
self._testErosionGradSamePadding_2x2x4(use_gpu)
if __name__ == "__main__":
test.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Support for Dataflow triggers.
Triggers control when in processing time windows get emitted.
"""
# pytype: skip-file
import collections
import copy
import logging
import numbers
from abc import ABCMeta
from abc import abstractmethod
from enum import Flag
from enum import auto
from functools import reduce
from itertools import zip_longest
from operator import or_
from apache_beam.coders import coder_impl
from apache_beam.coders import observable
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.transforms import combiners
from apache_beam.transforms import core
from apache_beam.transforms.timeutil import TimeDomain
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import GlobalWindows
from apache_beam.transforms.window import TimestampCombiner
from apache_beam.transforms.window import WindowedValue
from apache_beam.transforms.window import WindowFn
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MAX_TIMESTAMP
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import TIME_GRANULARITY
# AfterCount is experimental. No backwards compatibility guarantees.
__all__ = [
'AccumulationMode',
'TriggerFn',
'DefaultTrigger',
'AfterWatermark',
'AfterProcessingTime',
'AfterCount',
'Repeatedly',
'AfterAny',
'AfterAll',
'AfterEach',
'OrFinally',
]
_LOGGER = logging.getLogger(__name__)
class AccumulationMode(object):
"""Controls what to do with data when a trigger fires multiple times."""
DISCARDING = beam_runner_api_pb2.AccumulationMode.DISCARDING
ACCUMULATING = beam_runner_api_pb2.AccumulationMode.ACCUMULATING
# TODO(robertwb): Provide retractions of previous outputs.
# RETRACTING = 3
class _StateTag(metaclass=ABCMeta):
"""An identifier used to store and retrieve typed, combinable state.
The given tag must be unique for this step."""
def __init__(self, tag):
self.tag = tag
class _ReadModifyWriteStateTag(_StateTag):
"""StateTag pointing to an element."""
def __repr__(self):
return 'ValueStateTag(%s)' % (self.tag)
def with_prefix(self, prefix):
return _ReadModifyWriteStateTag(prefix + self.tag)
class _SetStateTag(_StateTag):
"""StateTag pointing to an element."""
def __repr__(self):
return 'SetStateTag({tag})'.format(tag=self.tag)
def with_prefix(self, prefix):
return _SetStateTag(prefix + self.tag)
class _CombiningValueStateTag(_StateTag):
"""StateTag pointing to an element, accumulated with a combiner.
The given tag must be unique for this step. The given CombineFn will be
applied (possibly incrementally and eagerly) when adding elements."""
# TODO(robertwb): Also store the coder (perhaps extracted from the combine_fn)
def __init__(self, tag, combine_fn):
super(_CombiningValueStateTag, self).__init__(tag)
if not combine_fn:
raise ValueError('combine_fn must be specified.')
if not isinstance(combine_fn, core.CombineFn):
combine_fn = core.CombineFn.from_callable(combine_fn)
self.combine_fn = combine_fn
def __repr__(self):
return 'CombiningValueStateTag(%s, %s)' % (self.tag, self.combine_fn)
def with_prefix(self, prefix):
return _CombiningValueStateTag(prefix + self.tag, self.combine_fn)
def without_extraction(self):
class NoExtractionCombineFn(core.CombineFn):
setup = self.combine_fn.setup
create_accumulator = self.combine_fn.create_accumulator
add_input = self.combine_fn.add_input
merge_accumulators = self.combine_fn.merge_accumulators
compact = self.combine_fn.compact
extract_output = staticmethod(lambda x: x)
teardown = self.combine_fn.teardown
return _CombiningValueStateTag(self.tag, NoExtractionCombineFn())
class _ListStateTag(_StateTag):
"""StateTag pointing to a list of elements."""
def __repr__(self):
return 'ListStateTag(%s)' % self.tag
def with_prefix(self, prefix):
return _ListStateTag(prefix + self.tag)
class _WatermarkHoldStateTag(_StateTag):
def __init__(self, tag, timestamp_combiner_impl):
super(_WatermarkHoldStateTag, self).__init__(tag)
self.timestamp_combiner_impl = timestamp_combiner_impl
def __repr__(self):
return 'WatermarkHoldStateTag(%s, %s)' % (
self.tag, self.timestamp_combiner_impl)
def with_prefix(self, prefix):
return _WatermarkHoldStateTag(
prefix + self.tag, self.timestamp_combiner_impl)
class DataLossReason(Flag):
"""Enum defining potential reasons that a trigger may cause data loss."""
NO_POTENTIAL_LOSS = 0
MAY_FINISH = auto()
CONDITION_NOT_GUARANTEED = auto()
# pylint: disable=unused-argument
# TODO(robertwb): Provisional API, Java likely to change as well.
class TriggerFn(metaclass=ABCMeta):
"""A TriggerFn determines when window (panes) are emitted.
See https://beam.apache.org/documentation/programming-guide/#triggers
"""
@abstractmethod
def on_element(self, element, window, context):
"""Called when a new element arrives in a window.
Args:
element: the element being added
window: the window to which the element is being added
context: a context (e.g. a TriggerContext instance) for managing state
and setting timers
"""
pass
@abstractmethod
def on_merge(self, to_be_merged, merge_result, context):
"""Called when multiple windows are merged.
Args:
to_be_merged: the set of windows to be merged
merge_result: the window into which the windows are being merged
context: a context (e.g. a TriggerContext instance) for managing state
and setting timers
"""
pass
@abstractmethod
def should_fire(self, time_domain, timestamp, window, context):
"""Whether this trigger should cause the window to fire.
Args:
time_domain: WATERMARK for event-time timers and REAL_TIME for
processing-time timers.
timestamp: for time_domain WATERMARK, it represents the
watermark: (a lower bound on) the watermark of the system
and for time_domain REAL_TIME, it represents the
trigger: timestamp of the processing-time timer.
window: the window whose trigger is being considered
context: a context (e.g. a TriggerContext instance) for managing state
and setting timers
Returns:
whether this trigger should cause a firing
"""
pass
@abstractmethod
def has_ontime_pane(self):
"""Whether this trigger creates an empty pane even if there are no elements.
Returns:
True if this trigger guarantees that there will always be an ON_TIME pane
even if there are no elements in that pane.
"""
pass
@abstractmethod
def on_fire(self, watermark, window, context):
"""Called when a trigger actually fires.
Args:
watermark: (a lower bound on) the watermark of the system
window: the window whose trigger is being fired
context: a context (e.g. a TriggerContext instance) for managing state
and setting timers
Returns:
whether this trigger is finished
"""
pass
@abstractmethod
def reset(self, window, context):
"""Clear any state and timers used by this TriggerFn."""
pass
def may_lose_data(self, unused_windowing):
# type: (core.Windowing) -> DataLossReason
"""Returns whether or not this trigger could cause data loss.
A trigger can cause data loss in the following scenarios:
* The trigger has a chance to finish. For instance, AfterWatermark()
without a late trigger would cause all late data to be lost. This
scenario is only accounted for if the windowing strategy allows
late data. Otherwise, the trigger is not responsible for the data
loss.
* The trigger condition may not be met. For instance,
Repeatedly(AfterCount(N)) may not fire due to N not being met. This
is only accounted for if the condition itself led to data loss.
Repeatedly(AfterCount(1)) is safe, since it would only not fire if
there is no data to lose, but Repeatedly(AfterCount(2)) can cause
data loss if there is only one record.
Note that this only returns the potential for loss. It does not mean that
there will be data loss. It also only accounts for loss related to the
trigger, not other potential causes.
Args:
windowing: The Windowing that this trigger belongs to. It does not need
to be the top-level trigger.
Returns:
The DataLossReason. If there is no potential loss,
DataLossReason.NO_POTENTIAL_LOSS is returned. Otherwise, all the
potential reasons are returned as a single value. For instance, if
data loss can result from finishing or not having the condition met,
the result will be DataLossReason.MAY_FINISH|CONDITION_NOT_GUARANTEED.
"""
# For backwards compatibility's sake, we're assuming the trigger is safe.
return DataLossReason.NO_POTENTIAL_LOSS
# pylint: enable=unused-argument
@staticmethod
def from_runner_api(proto, context):
return {
'after_all': AfterAll,
'after_any': AfterAny,
'after_each': AfterEach,
'after_end_of_window': AfterWatermark,
'after_processing_time': AfterProcessingTime,
# after_processing_time, after_synchronized_processing_time
'always': Always,
'default': DefaultTrigger,
'element_count': AfterCount,
'never': _Never,
'or_finally': OrFinally,
'repeat': Repeatedly,
}[proto.WhichOneof('trigger')].from_runner_api(proto, context)
@abstractmethod
def to_runner_api(self, unused_context):
pass
class DefaultTrigger(TriggerFn):
"""Semantically Repeatedly(AfterWatermark()), but more optimized."""
def __init__(self):
pass
def __repr__(self):
return 'DefaultTrigger()'
def on_element(self, element, window, context):
context.set_timer(str(window), TimeDomain.WATERMARK, window.end)
def on_merge(self, to_be_merged, merge_result, context):
for window in to_be_merged:
context.clear_timer(str(window), TimeDomain.WATERMARK)
def should_fire(self, time_domain, watermark, window, context):
if watermark >= window.end:
# Explicitly clear the timer so that late elements are not emitted again
# when the timer is fired.
context.clear_timer(str(window), TimeDomain.WATERMARK)
return watermark >= window.end
def on_fire(self, watermark, window, context):
return False
def reset(self, window, context):
context.clear_timer(str(window), TimeDomain.WATERMARK)
def may_lose_data(self, unused_windowing):
return DataLossReason.NO_POTENTIAL_LOSS
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
@staticmethod
def from_runner_api(proto, context):
return DefaultTrigger()
def to_runner_api(self, unused_context):
return beam_runner_api_pb2.Trigger(
default=beam_runner_api_pb2.Trigger.Default())
def has_ontime_pane(self):
return True
class AfterProcessingTime(TriggerFn):
"""Fire exactly once after a specified delay from processing time.
AfterProcessingTime is experimental. No backwards compatibility guarantees.
"""
def __init__(self, delay=0):
"""Initialize a processing time trigger with a delay in seconds."""
self.delay = delay
def __repr__(self):
return 'AfterProcessingTime(delay=%d)' % self.delay
def on_element(self, element, window, context):
context.set_timer(
'', TimeDomain.REAL_TIME, context.get_current_time() + self.delay)
def on_merge(self, to_be_merged, merge_result, context):
# timers will be kept through merging
pass
def should_fire(self, time_domain, timestamp, window, context):
if time_domain == TimeDomain.REAL_TIME:
return True
def on_fire(self, timestamp, window, context):
return True
def reset(self, window, context):
pass
def may_lose_data(self, unused_windowing):
return DataLossReason.MAY_FINISH
@staticmethod
def from_runner_api(proto, context):
return AfterProcessingTime(
delay=(
proto.after_processing_time.timestamp_transforms[0].delay.
delay_millis) // 1000)
def to_runner_api(self, context):
delay_proto = beam_runner_api_pb2.TimestampTransform(
delay=beam_runner_api_pb2.TimestampTransform.Delay(
delay_millis=self.delay * 1000))
return beam_runner_api_pb2.Trigger(
after_processing_time=beam_runner_api_pb2.Trigger.AfterProcessingTime(
timestamp_transforms=[delay_proto]))
def has_ontime_pane(self):
return False
class Always(TriggerFn):
"""Repeatedly invoke the given trigger, never finishing."""
def __init__(self):
pass
def __repr__(self):
return 'Always'
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return 1
def on_element(self, element, window, context):
pass
def on_merge(self, to_be_merged, merge_result, context):
pass
def has_ontime_pane(self):
return False
def reset(self, window, context):
pass
def should_fire(self, time_domain, watermark, window, context):
return True
def on_fire(self, watermark, window, context):
return False
def may_lose_data(self, unused_windowing):
return DataLossReason.NO_POTENTIAL_LOSS
@staticmethod
def from_runner_api(proto, context):
return Always()
def to_runner_api(self, context):
return beam_runner_api_pb2.Trigger(
always=beam_runner_api_pb2.Trigger.Always())
class _Never(TriggerFn):
"""A trigger that never fires.
Data may still be released at window closing.
"""
def __init__(self):
pass
def __repr__(self):
return 'Never'
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def on_element(self, element, window, context):
pass
def on_merge(self, to_be_merged, merge_result, context):
pass
def has_ontime_pane(self):
False
def reset(self, window, context):
pass
def should_fire(self, time_domain, watermark, window, context):
return False
def on_fire(self, watermark, window, context):
return True
def may_lose_data(self, unused_windowing):
"""No potential data loss.
Though Never doesn't explicitly trigger, it still collects data on
windowing closing, so any data loss is due to windowing closing.
"""
return DataLossReason.NO_POTENTIAL_LOSS
@staticmethod
def from_runner_api(proto, context):
return _Never()
def to_runner_api(self, context):
return beam_runner_api_pb2.Trigger(
never=beam_runner_api_pb2.Trigger.Never())
class AfterWatermark(TriggerFn):
"""Fire exactly once when the watermark passes the end of the window.
Args:
early: if not None, a speculative trigger to repeatedly evaluate before
the watermark passes the end of the window
late: if not None, a speculative trigger to repeatedly evaluate after
the watermark passes the end of the window
"""
LATE_TAG = _CombiningValueStateTag('is_late', any)
def __init__(self, early=None, late=None):
# TODO(zhoufek): Maybe don't wrap early/late if they are already Repeatedly
self.early = Repeatedly(early) if early else None
self.late = Repeatedly(late) if late else None
def __repr__(self):
qualifiers = []
if self.early:
qualifiers.append('early=%s' % self.early.underlying)
if self.late:
qualifiers.append('late=%s' % self.late.underlying)
return 'AfterWatermark(%s)' % ', '.join(qualifiers)
def is_late(self, context):
return self.late and context.get_state(self.LATE_TAG)
def on_element(self, element, window, context):
if self.is_late(context):
self.late.on_element(element, window, NestedContext(context, 'late'))
else:
context.set_timer('', TimeDomain.WATERMARK, window.end)
if self.early:
self.early.on_element(element, window, NestedContext(context, 'early'))
def on_merge(self, to_be_merged, merge_result, context):
# TODO(robertwb): Figure out whether the 'rewind' semantics could be used
# here.
if self.is_late(context):
self.late.on_merge(
to_be_merged, merge_result, NestedContext(context, 'late'))
else:
# Note: Timer clearing solely an optimization.
for window in to_be_merged:
if window.end != merge_result.end:
context.clear_timer('', TimeDomain.WATERMARK)
if self.early:
self.early.on_merge(
to_be_merged, merge_result, NestedContext(context, 'early'))
def should_fire(self, time_domain, watermark, window, context):
if self.is_late(context):
return self.late.should_fire(
time_domain, watermark, window, NestedContext(context, 'late'))
elif watermark >= window.end:
# Explicitly clear the timer so that late elements are not emitted again
# when the timer is fired.
context.clear_timer('', TimeDomain.WATERMARK)
return True
elif self.early:
return self.early.should_fire(
time_domain, watermark, window, NestedContext(context, 'early'))
return False
def on_fire(self, watermark, window, context):
if self.is_late(context):
return self.late.on_fire(
watermark, window, NestedContext(context, 'late'))
elif watermark >= window.end:
context.add_state(self.LATE_TAG, True)
return not self.late
elif self.early:
self.early.on_fire(watermark, window, NestedContext(context, 'early'))
return False
def reset(self, window, context):
if self.late:
context.clear_state(self.LATE_TAG)
if self.early:
self.early.reset(window, NestedContext(context, 'early'))
if self.late:
self.late.reset(window, NestedContext(context, 'late'))
def may_lose_data(self, windowing):
"""May cause data loss if the windowing allows lateness and either:
* The late trigger is not set
* The late trigger may cause data loss.
The second case is equivalent to Repeatedly(late).may_lose_data(windowing)
"""
if windowing.allowed_lateness == 0:
return DataLossReason.NO_POTENTIAL_LOSS
if self.late is None:
return DataLossReason.MAY_FINISH
return self.late.may_lose_data(windowing)
def __eq__(self, other):
return (
type(self) == type(other) and self.early == other.early and
self.late == other.late)
def __hash__(self):
return hash((type(self), self.early, self.late))
@staticmethod
def from_runner_api(proto, context):
return AfterWatermark(
early=TriggerFn.from_runner_api(
proto.after_end_of_window.early_firings, context)
if proto.after_end_of_window.HasField('early_firings') else None,
late=TriggerFn.from_runner_api(
proto.after_end_of_window.late_firings, context)
if proto.after_end_of_window.HasField('late_firings') else None)
def to_runner_api(self, context):
early_proto = self.early.underlying.to_runner_api(
context) if self.early else None
late_proto = self.late.underlying.to_runner_api(
context) if self.late else None
return beam_runner_api_pb2.Trigger(
after_end_of_window=beam_runner_api_pb2.Trigger.AfterEndOfWindow(
early_firings=early_proto, late_firings=late_proto))
def has_ontime_pane(self):
return True
class AfterCount(TriggerFn):
"""Fire when there are at least count elements in this window pane.
AfterCount is experimental. No backwards compatibility guarantees.
"""
COUNT_TAG = _CombiningValueStateTag('count', combiners.CountCombineFn())
def __init__(self, count):
if not isinstance(count, numbers.Integral) or count < 1:
raise ValueError("count (%d) must be a positive integer." % count)
self.count = count
def __repr__(self):
return 'AfterCount(%s)' % self.count
def __eq__(self, other):
return type(self) == type(other) and self.count == other.count
def __hash__(self):
return hash(self.count)
def on_element(self, element, window, context):
context.add_state(self.COUNT_TAG, 1)
def on_merge(self, to_be_merged, merge_result, context):
# states automatically merged
pass
def should_fire(self, time_domain, watermark, window, context):
return context.get_state(self.COUNT_TAG) >= self.count
def on_fire(self, watermark, window, context):
return True
def reset(self, window, context):
context.clear_state(self.COUNT_TAG)
def may_lose_data(self, unused_windowing):
reason = DataLossReason.MAY_FINISH
if self.count > 1:
reason |= DataLossReason.CONDITION_NOT_GUARANTEED
return reason
@staticmethod
def from_runner_api(proto, unused_context):
return AfterCount(proto.element_count.element_count)
def to_runner_api(self, unused_context):
return beam_runner_api_pb2.Trigger(
element_count=beam_runner_api_pb2.Trigger.ElementCount(
element_count=self.count))
def has_ontime_pane(self):
return False
class Repeatedly(TriggerFn):
"""Repeatedly invoke the given trigger, never finishing."""
def __init__(self, underlying):
self.underlying = underlying
def __repr__(self):
return 'Repeatedly(%s)' % self.underlying
def __eq__(self, other):
return type(self) == type(other) and self.underlying == other.underlying
def __hash__(self):
return hash(self.underlying)
def on_element(self, element, window, context):
self.underlying.on_element(element, window, context)
def on_merge(self, to_be_merged, merge_result, context):
self.underlying.on_merge(to_be_merged, merge_result, context)
def should_fire(self, time_domain, watermark, window, context):
return self.underlying.should_fire(time_domain, watermark, window, context)
def on_fire(self, watermark, window, context):
if self.underlying.on_fire(watermark, window, context):
self.underlying.reset(window, context)
return False
def reset(self, window, context):
self.underlying.reset(window, context)
def may_lose_data(self, windowing):
"""Repeatedly may only lose data if the underlying trigger may not have
its condition met.
For underlying triggers that may finish, Repeatedly overrides that
behavior.
"""
return (
self.underlying.may_lose_data(windowing)
& DataLossReason.CONDITION_NOT_GUARANTEED)
@staticmethod
def from_runner_api(proto, context):
return Repeatedly(
TriggerFn.from_runner_api(proto.repeat.subtrigger, context))
def to_runner_api(self, context):
return beam_runner_api_pb2.Trigger(
repeat=beam_runner_api_pb2.Trigger.Repeat(
subtrigger=self.underlying.to_runner_api(context)))
def has_ontime_pane(self):
return self.underlying.has_ontime_pane()
class _ParallelTriggerFn(TriggerFn, metaclass=ABCMeta):
def __init__(self, *triggers):
self.triggers = triggers
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__, ', '.join(str(t) for t in self.triggers))
def __eq__(self, other):
return type(self) == type(other) and self.triggers == other.triggers
def __hash__(self):
return hash(self.triggers)
@abstractmethod
def combine_op(self, trigger_results):
pass
def on_element(self, element, window, context):
for ix, trigger in enumerate(self.triggers):
trigger.on_element(element, window, self._sub_context(context, ix))
def on_merge(self, to_be_merged, merge_result, context):
for ix, trigger in enumerate(self.triggers):
trigger.on_merge(
to_be_merged, merge_result, self._sub_context(context, ix))
def should_fire(self, time_domain, watermark, window, context):
self._time_domain = time_domain
return self.combine_op(
trigger.should_fire(
time_domain, watermark, window, self._sub_context(context, ix))
for ix,
trigger in enumerate(self.triggers))
def on_fire(self, watermark, window, context):
finished = []
for ix, trigger in enumerate(self.triggers):
nested_context = self._sub_context(context, ix)
if trigger.should_fire(TimeDomain.WATERMARK,
watermark,
window,
nested_context):
finished.append(trigger.on_fire(watermark, window, nested_context))
return self.combine_op(finished)
def reset(self, window, context):
for ix, trigger in enumerate(self.triggers):
trigger.reset(window, self._sub_context(context, ix))
@staticmethod
def _sub_context(context, index):
return NestedContext(context, '%d/' % index)
@staticmethod
def from_runner_api(proto, context):
subtriggers = [
TriggerFn.from_runner_api(subtrigger, context) for subtrigger in
proto.after_all.subtriggers or proto.after_any.subtriggers
]
if proto.after_all.subtriggers:
return AfterAll(*subtriggers)
else:
return AfterAny(*subtriggers)
def to_runner_api(self, context):
subtriggers = [
subtrigger.to_runner_api(context) for subtrigger in self.triggers
]
if self.combine_op == all:
return beam_runner_api_pb2.Trigger(
after_all=beam_runner_api_pb2.Trigger.AfterAll(
subtriggers=subtriggers))
elif self.combine_op == any:
return beam_runner_api_pb2.Trigger(
after_any=beam_runner_api_pb2.Trigger.AfterAny(
subtriggers=subtriggers))
else:
raise NotImplementedError(self)
def has_ontime_pane(self):
return any(t.has_ontime_pane() for t in self.triggers)
class AfterAny(_ParallelTriggerFn):
"""Fires when any subtrigger fires.
Also finishes when any subtrigger finishes.
"""
combine_op = any
def may_lose_data(self, windowing):
reason = DataLossReason.NO_POTENTIAL_LOSS
for trigger in self.triggers:
t_reason = trigger.may_lose_data(windowing)
if t_reason == DataLossReason.NO_POTENTIAL_LOSS:
return t_reason
reason |= t_reason
return reason
class AfterAll(_ParallelTriggerFn):
"""Fires when all subtriggers have fired.
Also finishes when all subtriggers have finished.
"""
combine_op = all
def may_lose_data(self, windowing):
return reduce(or_, (t.may_lose_data(windowing) for t in self.triggers))
class AfterEach(TriggerFn):
INDEX_TAG = _CombiningValueStateTag(
'index', (lambda indices: 0 if not indices else max(indices)))
def __init__(self, *triggers):
self.triggers = triggers
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__, ', '.join(str(t) for t in self.triggers))
def __eq__(self, other):
return type(self) == type(other) and self.triggers == other.triggers
def __hash__(self):
return hash(self.triggers)
def on_element(self, element, window, context):
ix = context.get_state(self.INDEX_TAG)
if ix < len(self.triggers):
self.triggers[ix].on_element(
element, window, self._sub_context(context, ix))
def on_merge(self, to_be_merged, merge_result, context):
# This takes the furthest window on merging.
# TODO(robertwb): Revisit this when merging windows logic is settled for
# all possible merging situations.
ix = context.get_state(self.INDEX_TAG)
if ix < len(self.triggers):
self.triggers[ix].on_merge(
to_be_merged, merge_result, self._sub_context(context, ix))
def should_fire(self, time_domain, watermark, window, context):
ix = context.get_state(self.INDEX_TAG)
if ix < len(self.triggers):
return self.triggers[ix].should_fire(
time_domain, watermark, window, self._sub_context(context, ix))
def on_fire(self, watermark, window, context):
ix = context.get_state(self.INDEX_TAG)
if ix < len(self.triggers):
if self.triggers[ix].on_fire(watermark,
window,
self._sub_context(context, ix)):
ix += 1
context.add_state(self.INDEX_TAG, ix)
return ix == len(self.triggers)
def reset(self, window, context):
context.clear_state(self.INDEX_TAG)
for ix, trigger in enumerate(self.triggers):
trigger.reset(window, self._sub_context(context, ix))
def may_lose_data(self, windowing):
return reduce(or_, (t.may_lose_data(windowing) for t in self.triggers))
@staticmethod
def _sub_context(context, index):
return NestedContext(context, '%d/' % index)
@staticmethod
def from_runner_api(proto, context):
return AfterEach(
*[
TriggerFn.from_runner_api(subtrigger, context)
for subtrigger in proto.after_each.subtriggers
])
def to_runner_api(self, context):
return beam_runner_api_pb2.Trigger(
after_each=beam_runner_api_pb2.Trigger.AfterEach(
subtriggers=[
subtrigger.to_runner_api(context)
for subtrigger in self.triggers
]))
def has_ontime_pane(self):
return any(t.has_ontime_pane() for t in self.triggers)
class OrFinally(AfterAny):
@staticmethod
def from_runner_api(proto, context):
return OrFinally(
TriggerFn.from_runner_api(proto.or_finally.main, context),
# getattr is used as finally is a keyword in Python
TriggerFn.from_runner_api(
getattr(proto.or_finally, 'finally'), context))
def to_runner_api(self, context):
return beam_runner_api_pb2.Trigger(
or_finally=beam_runner_api_pb2.Trigger.OrFinally(
main=self.triggers[0].to_runner_api(context),
# dict keyword argument is used as finally is a keyword in Python
**{'finally': self.triggers[1].to_runner_api(context)}))
class TriggerContext(object):
def __init__(self, outer, window, clock):
self._outer = outer
self._window = window
self._clock = clock
def get_current_time(self):
return self._clock.time()
def set_timer(self, name, time_domain, timestamp):
self._outer.set_timer(self._window, name, time_domain, timestamp)
def clear_timer(self, name, time_domain):
self._outer.clear_timer(self._window, name, time_domain)
def add_state(self, tag, value):
self._outer.add_state(self._window, tag, value)
def get_state(self, tag):
return self._outer.get_state(self._window, tag)
def clear_state(self, tag):
return self._outer.clear_state(self._window, tag)
class NestedContext(object):
"""Namespaced context useful for defining composite triggers."""
def __init__(self, outer, prefix):
self._outer = outer
self._prefix = prefix
def get_current_time(self):
return self._outer.get_current_time()
def set_timer(self, name, time_domain, timestamp):
self._outer.set_timer(self._prefix + name, time_domain, timestamp)
def clear_timer(self, name, time_domain):
self._outer.clear_timer(self._prefix + name, time_domain)
def add_state(self, tag, value):
self._outer.add_state(tag.with_prefix(self._prefix), value)
def get_state(self, tag):
return self._outer.get_state(tag.with_prefix(self._prefix))
def clear_state(self, tag):
self._outer.clear_state(tag.with_prefix(self._prefix))
# pylint: disable=unused-argument
class SimpleState(metaclass=ABCMeta):
"""Basic state storage interface used for triggering.
Only timers must hold the watermark (by their timestamp).
"""
@abstractmethod
def set_timer(
self, window, name, time_domain, timestamp, dynamic_timer_tag=''):
pass
@abstractmethod
def get_window(self, window_id):
pass
@abstractmethod
def clear_timer(self, window, name, time_domain, dynamic_timer_tag=''):
pass
@abstractmethod
def add_state(self, window, tag, value):
pass
@abstractmethod
def get_state(self, window, tag):
pass
@abstractmethod
def clear_state(self, window, tag):
pass
def at(self, window, clock):
return NestedContext(TriggerContext(self, window, clock), 'trigger')
class UnmergedState(SimpleState):
"""State suitable for use in TriggerDriver.
This class must be implemented by each backend.
"""
@abstractmethod
def set_global_state(self, tag, value):
pass
@abstractmethod
def get_global_state(self, tag, default=None):
pass
# pylint: enable=unused-argument
class MergeableStateAdapter(SimpleState):
"""Wraps an UnmergedState, tracking merged windows."""
# TODO(robertwb): A similar indirection could be used for sliding windows
# or other window_fns when a single element typically belongs to many windows.
WINDOW_IDS = _ReadModifyWriteStateTag('window_ids')
def __init__(self, raw_state):
self.raw_state = raw_state
self.window_ids = self.raw_state.get_global_state(self.WINDOW_IDS, {})
self.counter = None
def set_timer(
self, window, name, time_domain, timestamp, dynamic_timer_tag=''):
self.raw_state.set_timer(
self._get_id(window),
name,
time_domain,
timestamp,
dynamic_timer_tag=dynamic_timer_tag)
def clear_timer(self, window, name, time_domain, dynamic_timer_tag=''):
for window_id in self._get_ids(window):
self.raw_state.clear_timer(
window_id, name, time_domain, dynamic_timer_tag=dynamic_timer_tag)
def add_state(self, window, tag, value):
if isinstance(tag, _ReadModifyWriteStateTag):
raise ValueError(
'Merging requested for non-mergeable state tag: %r.' % tag)
elif isinstance(tag, _CombiningValueStateTag):
tag = tag.without_extraction()
self.raw_state.add_state(self._get_id(window), tag, value)
def get_state(self, window, tag):
if isinstance(tag, _CombiningValueStateTag):
original_tag, tag = tag, tag.without_extraction()
values = [
self.raw_state.get_state(window_id, tag)
for window_id in self._get_ids(window)
]
if isinstance(tag, _ReadModifyWriteStateTag):
raise ValueError(
'Merging requested for non-mergeable state tag: %r.' % tag)
elif isinstance(tag, _CombiningValueStateTag):
return original_tag.combine_fn.extract_output(
original_tag.combine_fn.merge_accumulators(values))
elif isinstance(tag, _ListStateTag):
return [v for vs in values for v in vs]
elif isinstance(tag, _SetStateTag):
return {v for vs in values for v in vs}
elif isinstance(tag, _WatermarkHoldStateTag):
return tag.timestamp_combiner_impl.combine_all(values)
else:
raise ValueError('Invalid tag.', tag)
def clear_state(self, window, tag):
for window_id in self._get_ids(window):
self.raw_state.clear_state(window_id, tag)
if tag is None:
del self.window_ids[window]
self._persist_window_ids()
def merge(self, to_be_merged, merge_result):
for window in to_be_merged:
if window != merge_result:
if window in self.window_ids:
if merge_result in self.window_ids:
merge_window_ids = self.window_ids[merge_result]
else:
merge_window_ids = self.window_ids[merge_result] = []
merge_window_ids.extend(self.window_ids.pop(window))
self._persist_window_ids()
def known_windows(self):
return list(self.window_ids)
def get_window(self, window_id):
for window, ids in self.window_ids.items():
if window_id in ids:
return window
raise ValueError('No window for %s' % window_id)
def _get_id(self, window):
if window in self.window_ids:
return self.window_ids[window][0]
window_id = self._get_next_counter()
self.window_ids[window] = [window_id]
self._persist_window_ids()
return window_id
def _get_ids(self, window):
return self.window_ids.get(window, [])
def _get_next_counter(self):
if not self.window_ids:
self.counter = 0
elif self.counter is None:
self.counter = max(k for ids in self.window_ids.values() for k in ids)
self.counter += 1
return self.counter
def _persist_window_ids(self):
self.raw_state.set_global_state(self.WINDOW_IDS, self.window_ids)
def __repr__(self):
return '\n\t'.join([repr(self.window_ids)] +
repr(self.raw_state).split('\n'))
def create_trigger_driver(
windowing, is_batch=False, phased_combine_fn=None, clock=None):
"""Create the TriggerDriver for the given windowing and options."""
# TODO(BEAM-10149): Respect closing and on-time behaviors.
# For batch, we should always fire once, no matter what.
if is_batch and windowing.triggerfn == _Never():
windowing = copy.copy(windowing)
windowing.triggerfn = Always()
# TODO(robertwb): We can do more if we know elements are in timestamp
# sorted order.
if windowing.is_default() and is_batch:
driver = BatchGlobalTriggerDriver()
elif (windowing.windowfn == GlobalWindows() and
(windowing.triggerfn in [AfterCount(1), Always()]) and is_batch):
# Here we also just pass through all the values exactly once.
driver = BatchGlobalTriggerDriver()
else:
driver = GeneralTriggerDriver(windowing, clock)
if phased_combine_fn:
# TODO(ccy): Refactor GeneralTriggerDriver to combine values eagerly using
# the known phased_combine_fn here.
driver = CombiningTriggerDriver(phased_combine_fn, driver)
return driver
class TriggerDriver(metaclass=ABCMeta):
"""Breaks a series of bundle and timer firings into window (pane)s."""
@abstractmethod
def process_elements(
self,
state,
windowed_values,
output_watermark,
input_watermark=MIN_TIMESTAMP):
pass
@abstractmethod
def process_timer(
self,
window_id,
name,
time_domain,
timestamp,
state,
input_watermark=None):
pass
def process_entire_key(self, key, windowed_values):
# This state holds per-key, multi-window state.
state = InMemoryUnmergedState()
for wvalue in self.process_elements(state,
windowed_values,
MIN_TIMESTAMP,
MIN_TIMESTAMP):
yield wvalue.with_value((key, wvalue.value))
while state.timers:
fired = state.get_and_clear_timers()
for timer_window, (name, time_domain, fire_time, _) in fired:
for wvalue in self.process_timer(timer_window,
name,
time_domain,
fire_time,
state):
yield wvalue.with_value((key, wvalue.value))
class _UnwindowedValues(observable.ObservableMixin):
"""Exposes iterable of windowed values as iterable of unwindowed values."""
def __init__(self, windowed_values):
super(_UnwindowedValues, self).__init__()
self._windowed_values = windowed_values
def __iter__(self):
for wv in self._windowed_values:
unwindowed_value = wv.value
self.notify_observers(unwindowed_value)
yield unwindowed_value
def __repr__(self):
return '<_UnwindowedValues of %s>' % self._windowed_values
def __reduce__(self):
return list, (list(self), )
def __eq__(self, other):
if isinstance(other, collections.Iterable):
return all(
a == b for a, b in zip_longest(self, other, fillvalue=object()))
else:
return NotImplemented
def __hash__(self):
return hash(tuple(self))
coder_impl.FastPrimitivesCoderImpl.register_iterable_like_type(
_UnwindowedValues)
class BatchGlobalTriggerDriver(TriggerDriver):
"""Groups all received values together.
"""
GLOBAL_WINDOW_TUPLE = (GlobalWindow(), )
ONLY_FIRING = windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)
def process_elements(
self,
state,
windowed_values,
unused_output_watermark,
unused_input_watermark=MIN_TIMESTAMP):
yield WindowedValue(
_UnwindowedValues(windowed_values),
MIN_TIMESTAMP,
self.GLOBAL_WINDOW_TUPLE,
self.ONLY_FIRING)
def process_timer(
self,
window_id,
name,
time_domain,
timestamp,
state,
input_watermark=None):
raise TypeError('Triggers never set or called for batch default windowing.')
class CombiningTriggerDriver(TriggerDriver):
"""Uses a phased_combine_fn to process output of wrapped TriggerDriver."""
def __init__(self, phased_combine_fn, underlying):
self.phased_combine_fn = phased_combine_fn
self.underlying = underlying
def process_elements(
self,
state,
windowed_values,
output_watermark,
input_watermark=MIN_TIMESTAMP):
uncombined = self.underlying.process_elements(
state, windowed_values, output_watermark, input_watermark)
for output in uncombined:
yield output.with_value(self.phased_combine_fn.apply(output.value))
def process_timer(
self,
window_id,
name,
time_domain,
timestamp,
state,
input_watermark=None):
uncombined = self.underlying.process_timer(
window_id, name, time_domain, timestamp, state, input_watermark)
for output in uncombined:
yield output.with_value(self.phased_combine_fn.apply(output.value))
class GeneralTriggerDriver(TriggerDriver):
"""Breaks a series of bundle and timer firings into window (pane)s.
Suitable for all variants of Windowing.
"""
ELEMENTS = _ListStateTag('elements')
TOMBSTONE = _CombiningValueStateTag('tombstone', combiners.CountCombineFn())
INDEX = _CombiningValueStateTag('index', combiners.CountCombineFn())
NONSPECULATIVE_INDEX = _CombiningValueStateTag(
'nonspeculative_index', combiners.CountCombineFn())
def __init__(self, windowing, clock):
self.clock = clock
self.allowed_lateness = windowing.allowed_lateness
self.window_fn = windowing.windowfn
self.timestamp_combiner_impl = TimestampCombiner.get_impl(
windowing.timestamp_combiner, self.window_fn)
# pylint: disable=invalid-name
self.WATERMARK_HOLD = _WatermarkHoldStateTag(
'watermark', self.timestamp_combiner_impl)
# pylint: enable=invalid-name
self.trigger_fn = windowing.triggerfn
self.accumulation_mode = windowing.accumulation_mode
self.is_merging = True
def process_elements(
self,
state,
windowed_values,
output_watermark,
input_watermark=MIN_TIMESTAMP):
if self.is_merging:
state = MergeableStateAdapter(state)
windows_to_elements = collections.defaultdict(list)
for wv in windowed_values:
for window in wv.windows:
# ignore expired windows
if input_watermark > window.end + self.allowed_lateness:
continue
windows_to_elements[window].append((wv.value, wv.timestamp))
# First handle merging.
if self.is_merging:
old_windows = set(state.known_windows())
all_windows = old_windows.union(list(windows_to_elements))
if all_windows != old_windows:
merged_away = {}
class TriggerMergeContext(WindowFn.MergeContext):
def merge(_, to_be_merged, merge_result): # pylint: disable=no-self-argument
for window in to_be_merged:
if window != merge_result:
merged_away[window] = merge_result
# Clear state associated with PaneInfo since it is
# not preserved across merges.
state.clear_state(window, self.INDEX)
state.clear_state(window, self.NONSPECULATIVE_INDEX)
state.merge(to_be_merged, merge_result)
# using the outer self argument.
self.trigger_fn.on_merge(
to_be_merged, merge_result, state.at(merge_result, self.clock))
self.window_fn.merge(TriggerMergeContext(all_windows))
merged_windows_to_elements = collections.defaultdict(list)
for window, values in windows_to_elements.items():
while window in merged_away:
window = merged_away[window]
merged_windows_to_elements[window].extend(values)
windows_to_elements = merged_windows_to_elements
for window in merged_away:
state.clear_state(window, self.WATERMARK_HOLD)
# Next handle element adding.
for window, elements in windows_to_elements.items():
if state.get_state(window, self.TOMBSTONE):
continue
# Add watermark hold.
# TODO(ccy): Add late data and garbage-collection hold support.
output_time = self.timestamp_combiner_impl.merge(
window,
(
element_output_time for element_output_time in (
self.timestamp_combiner_impl.assign_output_time(
window, timestamp) for unused_value,
timestamp in elements)
if element_output_time >= output_watermark))
if output_time is not None:
state.add_state(window, self.WATERMARK_HOLD, output_time)
context = state.at(window, self.clock)
for value, unused_timestamp in elements:
state.add_state(window, self.ELEMENTS, value)
self.trigger_fn.on_element(value, window, context)
# Maybe fire this window.
if self.trigger_fn.should_fire(TimeDomain.WATERMARK,
input_watermark,
window,
context):
finished = self.trigger_fn.on_fire(input_watermark, window, context)
yield self._output(window, finished, state, output_watermark, False)
def process_timer(
self,
window_id,
unused_name,
time_domain,
timestamp,
state,
input_watermark=None):
if input_watermark is None:
input_watermark = timestamp
if self.is_merging:
state = MergeableStateAdapter(state)
window = state.get_window(window_id)
if state.get_state(window, self.TOMBSTONE):
return
if time_domain in (TimeDomain.WATERMARK, TimeDomain.REAL_TIME):
if not self.is_merging or window in state.known_windows():
context = state.at(window, self.clock)
if self.trigger_fn.should_fire(time_domain, timestamp, window, context):
finished = self.trigger_fn.on_fire(timestamp, window, context)
yield self._output(
window,
finished,
state,
timestamp,
time_domain == TimeDomain.WATERMARK)
else:
raise Exception('Unexpected time domain: %s' % time_domain)
def _output(self, window, finished, state, output_watermark, maybe_ontime):
"""Output window and clean up if appropriate."""
index = state.get_state(window, self.INDEX)
state.add_state(window, self.INDEX, 1)
if output_watermark <= window.max_timestamp():
nonspeculative_index = -1
timing = windowed_value.PaneInfoTiming.EARLY
if state.get_state(window, self.NONSPECULATIVE_INDEX):
nonspeculative_index = state.get_state(
window, self.NONSPECULATIVE_INDEX)
state.add_state(window, self.NONSPECULATIVE_INDEX, 1)
_LOGGER.warning(
'Watermark moved backwards in time '
'or late data moved window end forward.')
else:
nonspeculative_index = state.get_state(window, self.NONSPECULATIVE_INDEX)
state.add_state(window, self.NONSPECULATIVE_INDEX, 1)
timing = (
windowed_value.PaneInfoTiming.ON_TIME if maybe_ontime and
nonspeculative_index == 0 else windowed_value.PaneInfoTiming.LATE)
pane_info = windowed_value.PaneInfo(
index == 0, finished, timing, index, nonspeculative_index)
values = state.get_state(window, self.ELEMENTS)
if finished:
# TODO(robertwb): allowed lateness
state.clear_state(window, self.ELEMENTS)
state.add_state(window, self.TOMBSTONE, 1)
elif self.accumulation_mode == AccumulationMode.DISCARDING:
state.clear_state(window, self.ELEMENTS)
timestamp = state.get_state(window, self.WATERMARK_HOLD)
if timestamp is None:
# If no watermark hold was set, output at end of window.
timestamp = window.max_timestamp()
elif output_watermark < window.end and self.trigger_fn.has_ontime_pane():
# Hold the watermark in case there is an empty pane that needs to be fired
# at the end of the window.
pass
else:
state.clear_state(window, self.WATERMARK_HOLD)
return WindowedValue(values, timestamp, (window, ), pane_info)
class InMemoryUnmergedState(UnmergedState):
"""In-memory implementation of UnmergedState.
Used for batch and testing.
"""
def __init__(self, defensive_copy=False):
# TODO(robertwb): Clean defensive_copy. It is too expensive in production.
self.timers = collections.defaultdict(dict)
self.state = collections.defaultdict(lambda: collections.defaultdict(list))
self.global_state = {}
self.defensive_copy = defensive_copy
def copy(self):
cloned_object = InMemoryUnmergedState(defensive_copy=self.defensive_copy)
cloned_object.timers = copy.deepcopy(self.timers)
cloned_object.global_state = copy.deepcopy(self.global_state)
for window in self.state:
for tag in self.state[window]:
cloned_object.state[window][tag] = copy.copy(self.state[window][tag])
return cloned_object
def set_global_state(self, tag, value):
assert isinstance(tag, _ReadModifyWriteStateTag)
if self.defensive_copy:
value = copy.deepcopy(value)
self.global_state[tag.tag] = value
def get_global_state(self, tag, default=None):
return self.global_state.get(tag.tag, default)
def set_timer(
self, window, name, time_domain, timestamp, dynamic_timer_tag=''):
self.timers[window][(name, time_domain, dynamic_timer_tag)] = timestamp
def clear_timer(self, window, name, time_domain, dynamic_timer_tag=''):
self.timers[window].pop((name, time_domain, dynamic_timer_tag), None)
if not self.timers[window]:
del self.timers[window]
def get_window(self, window_id):
return window_id
def add_state(self, window, tag, value):
if self.defensive_copy:
value = copy.deepcopy(value)
if isinstance(tag, _ReadModifyWriteStateTag):
self.state[window][tag.tag] = value
elif isinstance(tag, _CombiningValueStateTag):
# TODO(robertwb): Store merged accumulators.
self.state[window][tag.tag].append(value)
elif isinstance(tag, _ListStateTag):
self.state[window][tag.tag].append(value)
elif isinstance(tag, _SetStateTag):
self.state[window][tag.tag].append(value)
elif isinstance(tag, _WatermarkHoldStateTag):
self.state[window][tag.tag].append(value)
else:
raise ValueError('Invalid tag.', tag)
def get_state(self, window, tag):
values = self.state[window][tag.tag]
if isinstance(tag, _ReadModifyWriteStateTag):
return values
elif isinstance(tag, _CombiningValueStateTag):
return tag.combine_fn.apply(values)
elif isinstance(tag, _ListStateTag):
return values
elif isinstance(tag, _SetStateTag):
return values
elif isinstance(tag, _WatermarkHoldStateTag):
return tag.timestamp_combiner_impl.combine_all(values)
else:
raise ValueError('Invalid tag.', tag)
def clear_state(self, window, tag):
self.state[window].pop(tag.tag, None)
if not self.state[window]:
self.state.pop(window, None)
def get_timers(
self, clear=False, watermark=MAX_TIMESTAMP, processing_time=None):
"""Gets expired timers and reports if there
are any realtime timers set per state.
Expiration is measured against the watermark for event-time timers,
and against a wall clock for processing-time timers.
"""
expired = []
has_realtime_timer = False
for window, timers in list(self.timers.items()):
for (name, time_domain, dynamic_timer_tag), timestamp in list(
timers.items()):
if time_domain == TimeDomain.REAL_TIME:
time_marker = processing_time
has_realtime_timer = True
elif time_domain == TimeDomain.WATERMARK:
time_marker = watermark
else:
_LOGGER.error(
'TimeDomain error: No timers defined for time domain %s.',
time_domain)
if timestamp <= time_marker:
expired.append(
(window, (name, time_domain, timestamp, dynamic_timer_tag)))
if clear:
del timers[(name, time_domain, dynamic_timer_tag)]
if not timers and clear:
del self.timers[window]
return expired, has_realtime_timer
def get_and_clear_timers(self, watermark=MAX_TIMESTAMP):
return self.get_timers(clear=True, watermark=watermark)[0]
def get_earliest_hold(self):
earliest_hold = MAX_TIMESTAMP
for unused_window, tagged_states in self.state.items():
# TODO(BEAM-2519): currently, this assumes that the watermark hold tag is
# named "watermark". This is currently only true because the only place
# watermark holds are set is in the GeneralTriggerDriver, where we use
# this name. We should fix this by allowing enumeration of the tag types
# used in adding state.
if 'watermark' in tagged_states and tagged_states['watermark']:
hold = min(tagged_states['watermark']) - TIME_GRANULARITY
earliest_hold = min(earliest_hold, hold)
return earliest_hold
def __repr__(self):
state_str = '\n'.join(
'%s: %s' % (key, dict(state)) for key, state in self.state.items())
return 'timers: %s\nstate: %s' % (dict(self.timers), state_str)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MuseumObject.three_d_link'
db.add_column('cat_museumobject', 'three_d_link',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MuseumObject.three_d_link'
db.delete_column('cat_museumobject', 'three_d_link')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cat.accessstatus': {
'Meta': {'object_name': 'AccessStatus'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'cat.acquisitionmethod': {
'Meta': {'object_name': 'AcquisitionMethod'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'preposition': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'cat.artefacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ArtefactType'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'}),
'see_also': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'})
},
'cat.category': {
'Meta': {'ordering': "['parent__name', 'name']", 'unique_together': "(('slug', 'parent'), ('name', 'parent'))", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'icon_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'icon_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cat.Category']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'suggested_artefact_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'categories'", 'null': 'True', 'to': "orm['cat.ArtefactType']"})
},
'cat.culturalbloc': {
'Meta': {'ordering': "['name']", 'object_name': 'CulturalBloc'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30', 'db_index': 'True'})
},
'cat.functionalcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalCategory'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cat.loanstatus': {
'Meta': {'object_name': 'LoanStatus'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'cat.museumobject': {
'Meta': {'ordering': "['registration_number']", 'object_name': 'MuseumObject'},
'access_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.AccessStatus']", 'null': 'True'}),
'acquisition_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'acquisition_method': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.AcquisitionMethod']", 'null': 'True'}),
'artefact_illustrated': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'artefact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.ArtefactType']"}),
'assoc_cultural_group': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cat.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'category_illustrated': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'circumference': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'collector': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'collected_objects'", 'null': 'True', 'to': "orm['parties.Person']"}),
'collector_2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'collected_objects_2'", 'null': 'True', 'to': "orm['parties.Person']"}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'country': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': "orm['location.Country']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cultural_bloc': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.CulturalBloc']", 'null': 'True', 'blank': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'donor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donated_objects'", 'null': 'True', 'to': "orm['parties.Person']"}),
'donor_2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donated_objects_2'", 'null': 'True', 'to': "orm['parties.Person']"}),
'exhibition_history': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'functional_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.FunctionalCategory']", 'null': 'True', 'blank': 'True'}),
'global_region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['location.GlobalRegion']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'how_collector_obtained': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'collector_obtained'", 'null': 'True', 'to': "orm['cat.Obtained']"}),
'how_donor_obtained': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'donor_obtained'", 'null': 'True', 'to': "orm['cat.Obtained']"}),
'how_source_obtained': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_obtained'", 'null': 'True', 'to': "orm['cat.Obtained']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indigenous_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'is_public_comment': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'loan_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.LoanStatus']", 'null': 'True'}),
'locality': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': "orm['location.Locality']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'maker': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_items'", 'null': 'True', 'to': "orm['parties.Person']"}),
'manufacture_technique': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'old_maker': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_maker'", 'null': 'True', 'to': "orm['parties.Maker']"}),
'old_registration_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'other_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'photographer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['location.Place']", 'null': 'True'}),
'private_comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'raw_material': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'record_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.RecordStatus']", 'null': 'True', 'blank': 'True'}),
'recorded_use': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'reg_counter': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'reg_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'region_district': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': "orm['location.RegionDistrict']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'registered_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['parties.MuseumStaff']", 'null': 'True', 'blank': 'True'}),
'registration_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'registration_number': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'db_index': 'True'}),
'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['mediaman.Document']", 'null': 'True', 'blank': 'True'}),
'significance': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'site_name_number': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'state_province': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': "orm['location.StateProvince']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'storage_bay': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'storage_section': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'storage_shelf_box_drawer': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'storage_unit': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'three_d_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'when_collector_obtained': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'when_donor_obtained': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cat.obtained': {
'Meta': {'object_name': 'Obtained'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'how': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'cat.photorecord': {
'Meta': {'object_name': 'PhotoRecord'},
'comments': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'museum_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.MuseumObject']"}),
'phototype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.PhotoType']"})
},
'cat.phototype': {
'Meta': {'object_name': 'PhotoType'},
'definition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phototype': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'cat.recordstatus': {
'Meta': {'object_name': 'RecordStatus'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'cat.reference': {
'Meta': {'object_name': 'Reference'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'museum_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.MuseumObject']"}),
'publications_details': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'location.country': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'Country'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.GlobalRegion']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.globalregion': {
'Meta': {'ordering': "['name']", 'object_name': 'GlobalRegion'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'icon_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'icon_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.locality': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'Locality'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.RegionDistrict']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.place': {
'Meta': {'ordering': "['id']", 'object_name': 'Place'},
'australian_state': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_corrected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'location.regiondistrict': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'RegionDistrict'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.StateProvince']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.stateprovince': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'StateProvince'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.Country']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'mediaman.document': {
'Meta': {'object_name': 'Document'},
'document': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'document_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filesize': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_filedate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'parties.maker': {
'Meta': {'ordering': "['name']", 'object_name': 'Maker'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'parties.museumstaff': {
'Meta': {'ordering': "['name']", 'object_name': 'MuseumStaff'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'parties.person': {
'Meta': {'ordering': "['name']", 'object_name': 'Person'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'related_people'", 'blank': 'True', 'to': "orm['mediaman.Document']"})
}
}
complete_apps = ['cat']
|
|
"""Test the init file for the Insteon component."""
import asyncio
from unittest.mock import patch
from pyinsteon.address import Address
from homeassistant.components import insteon
from homeassistant.components.insteon.const import (
CONF_CAT,
CONF_OVERRIDE,
CONF_SUBCAT,
CONF_X10,
DOMAIN,
PORT_HUB_V1,
PORT_HUB_V2,
)
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from .const import (
MOCK_ADDRESS,
MOCK_CAT,
MOCK_IMPORT_CONFIG_PLM,
MOCK_IMPORT_FULL_CONFIG_HUB_V1,
MOCK_IMPORT_FULL_CONFIG_HUB_V2,
MOCK_IMPORT_FULL_CONFIG_PLM,
MOCK_IMPORT_MINIMUM_HUB_V1,
MOCK_IMPORT_MINIMUM_HUB_V2,
MOCK_SUBCAT,
MOCK_USER_INPUT_PLM,
PATCH_CONNECTION,
)
from .mock_devices import MockDevices
from tests.common import MockConfigEntry
async def mock_successful_connection(*args, **kwargs):
"""Return a successful connection."""
return True
async def mock_failed_connection(*args, **kwargs):
"""Return a failed connection."""
raise ConnectionError("Connection failed")
async def test_setup_entry(hass: HomeAssistantType):
"""Test setting up the entry."""
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_INPUT_PLM)
config_entry.add_to_hass(hass)
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "async_close") as mock_close, patch.object(
insteon, "devices", new=MockDevices()
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
{},
)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
# pylint: disable=no-member
assert insteon.devices.async_save.call_count == 1
assert mock_close.called
async def test_import_plm(hass: HomeAssistantType):
"""Test setting up the entry from YAML to a PLM."""
config = {}
config[DOMAIN] = MOCK_IMPORT_CONFIG_PLM
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01)
assert hass.config_entries.async_entries(DOMAIN)
data = hass.config_entries.async_entries(DOMAIN)[0].data
assert data[CONF_DEVICE] == MOCK_IMPORT_CONFIG_PLM[CONF_PORT]
assert CONF_PORT not in data
async def test_import_hub1(hass: HomeAssistantType):
"""Test setting up the entry from YAML to a hub v1."""
config = {}
config[DOMAIN] = MOCK_IMPORT_MINIMUM_HUB_V1
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01)
assert hass.config_entries.async_entries(DOMAIN)
data = hass.config_entries.async_entries(DOMAIN)[0].data
assert data[CONF_HOST] == MOCK_IMPORT_FULL_CONFIG_HUB_V1[CONF_HOST]
assert data[CONF_PORT] == PORT_HUB_V1
assert CONF_USERNAME not in data
assert CONF_PASSWORD not in data
async def test_import_hub2(hass: HomeAssistantType):
"""Test setting up the entry from YAML to a hub v2."""
config = {}
config[DOMAIN] = MOCK_IMPORT_MINIMUM_HUB_V2
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01)
assert hass.config_entries.async_entries(DOMAIN)
data = hass.config_entries.async_entries(DOMAIN)[0].data
assert data[CONF_HOST] == MOCK_IMPORT_FULL_CONFIG_HUB_V2[CONF_HOST]
assert data[CONF_PORT] == PORT_HUB_V2
assert data[CONF_USERNAME] == MOCK_IMPORT_MINIMUM_HUB_V2[CONF_USERNAME]
assert data[CONF_PASSWORD] == MOCK_IMPORT_MINIMUM_HUB_V2[CONF_PASSWORD]
async def test_import_options(hass: HomeAssistantType):
"""Test setting up the entry from YAML including options."""
config = {}
config[DOMAIN] = MOCK_IMPORT_FULL_CONFIG_PLM
with patch.object(
insteon, "async_connect", new=mock_successful_connection
), patch.object(insteon, "close_insteon_connection"), patch.object(
insteon, "devices", new=MockDevices()
), patch(
PATCH_CONNECTION, new=mock_successful_connection
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
await asyncio.sleep(0.01) # Need to yield to async processes
# pylint: disable=no-member
assert insteon.devices.add_x10_device.call_count == 2
assert insteon.devices.set_id.call_count == 1
options = hass.config_entries.async_entries(DOMAIN)[0].options
assert len(options[CONF_OVERRIDE]) == 1
assert options[CONF_OVERRIDE][0][CONF_ADDRESS] == str(Address(MOCK_ADDRESS))
assert options[CONF_OVERRIDE][0][CONF_CAT] == MOCK_CAT
assert options[CONF_OVERRIDE][0][CONF_SUBCAT] == MOCK_SUBCAT
assert len(options[CONF_X10]) == 2
assert options[CONF_X10][0] == MOCK_IMPORT_FULL_CONFIG_PLM[CONF_X10][0]
assert options[CONF_X10][1] == MOCK_IMPORT_FULL_CONFIG_PLM[CONF_X10][1]
async def test_import_failed_connection(hass: HomeAssistantType):
"""Test a failed connection in import does not create a config entry."""
config = {}
config[DOMAIN] = MOCK_IMPORT_CONFIG_PLM
with patch.object(
insteon, "async_connect", new=mock_failed_connection
), patch.object(insteon, "async_close"), patch.object(
insteon, "devices", new=MockDevices(connected=False)
):
assert await async_setup_component(
hass,
insteon.DOMAIN,
config,
)
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(DOMAIN)
async def test_setup_entry_failed_connection(hass: HomeAssistantType, caplog):
"""Test setting up the entry with a failed connection."""
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_INPUT_PLM)
config_entry.add_to_hass(hass)
with patch.object(
insteon, "async_connect", new=mock_failed_connection
), patch.object(insteon, "devices", new=MockDevices(connected=False)):
assert await async_setup_component(
hass,
insteon.DOMAIN,
{},
)
assert "Could not connect to Insteon modem" in caplog.text
|
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Provides functions for reading and writing (writing is WIP currently) Java
objects serialized or will be deserialized by ObjectOutputStream. This form of
object representation is a standard data interchange format in Java world.
javaobj module exposes an API familiar to users of the standard library
marshal, pickle and json modules.
See:
http://download.oracle.com/javase/6/docs/platform/serialization/spec/protocol.html
:authors: Volodymyr Buell, Thomas Calmant
:license: Apache License 2.0
:version: 0.2.3
:status: Alpha
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import collections
import logging
import os
import struct
import sys
try:
# Python 2
from StringIO import StringIO as BytesIO
except ImportError:
# Python 3+
from io import BytesIO
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 2, 3)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Setup the logger
_log = logging.getLogger(__name__)
def log_debug(message, ident=0):
"""
Logs a message at debug level
:param message: Message to log
:param ident: Number of indentation spaces
"""
_log.debug(" " * (ident * 2) + str(message))
def log_error(message, ident=0):
"""
Logs a message at error level
:param message: Message to log
:param ident: Number of indentation spaces
"""
_log.error(" " * (ident * 2) + str(message))
# ------------------------------------------------------------------------------
if sys.version_info[0] >= 3:
# Python 3 interpreter : bytes & str
def to_bytes(data, encoding="UTF-8"):
"""
Converts the given string to an array of bytes.
Returns the first parameter if it is already an array of bytes.
:param data: A unicode string
:param encoding: The encoding of data
:return: The corresponding array of bytes
"""
if type(data) is bytes:
# Nothing to do
return data
return data.encode(encoding)
def to_str(data, encoding="UTF-8"):
"""
Converts the given parameter to a string.
Returns the first parameter if it is already an instance of ``str``.
:param data: A string
:param encoding: The encoding of data
:return: The corresponding string
"""
if type(data) is str:
# Nothing to do
return data
return str(data, encoding)
def read_to_str(data):
"""
Concats all bytes into a string
"""
return ''.join(chr(char) for char in data)
else:
# Python 2 interpreter : str & unicode
def to_str(data, encoding="UTF-8"):
"""
Converts the given parameter to a string.
Returns the first parameter if it is already an instance of ``str``.
:param data: A string
:param encoding: The encoding of data
:return: The corresponding string
"""
if type(data) is str:
# Nothing to do
return data
return data.encode(encoding)
# Same operation
to_bytes = to_str
def read_to_str(data):
"""
Nothing to do in Python 2
"""
return data
# ------------------------------------------------------------------------------
def load(file_object, *transformers, **kwargs):
"""
Deserializes Java primitive data and objects serialized using
ObjectOutputStream from a file-like object.
:param file_object: A file-like object
:param transformers: Custom transformers to use
:param ignore_remaining_data: If True, don't log an error when unused
trailing bytes are remaining
:return: The deserialized object
"""
# Read keyword argument
ignore_remaining_data = kwargs.get('ignore_remaining_data', False)
marshaller = JavaObjectUnmarshaller(
file_object, kwargs.get('use_numpy_arrays', False))
# Add custom transformers first
for transformer in transformers:
marshaller.add_transformer(transformer)
marshaller.add_transformer(DefaultObjectTransformer())
# Read the file object
return marshaller.readObject(ignore_remaining_data=ignore_remaining_data)
def loads(string, *transformers, **kwargs):
"""
Deserializes Java objects and primitive data serialized using
ObjectOutputStream from a string.
:param string: A Java data string
:param transformers: Custom transformers to use
:param ignore_remaining_data: If True, don't log an error when unused
trailing bytes are remaining
:return: The deserialized object
"""
# Read keyword argument
ignore_remaining_data = kwargs.get('ignore_remaining_data', False)
# Reuse the load method (avoid code duplication)
return load(BytesIO(string), *transformers,
ignore_remaining_data=ignore_remaining_data)
def dumps(obj, *transformers):
"""
Serializes Java primitive data and objects unmarshaled by load(s) before
into string.
:param obj: A Python primitive object, or one loaded using load(s)
:param transformers: Custom transformers to use
:return: The serialized data as a string
"""
marshaller = JavaObjectMarshaller()
# Add custom transformers
for transformer in transformers:
marshaller.add_transformer(transformer)
return marshaller.dump(obj)
# ------------------------------------------------------------------------------
class JavaClass(object):
"""
Represents a class in the Java world
"""
def __init__(self):
"""
Sets up members
"""
self.name = None
self.serialVersionUID = None
self.flags = None
self.fields_names = []
self.fields_types = []
self.superclass = None
def __str__(self):
"""
String representation of the Java class
"""
return self.__repr__()
def __repr__(self):
"""
String representation of the Java class
"""
return "[{0:s}:0x{1:X}]".format(self.name, self.serialVersionUID)
def __eq__(self, other):
"""
Equality test between two Java classes
:param other: Other JavaClass to test
:return: True if both classes share the same fields and name
"""
if not isinstance(other, type(self)):
return False
return (self.name == other.name and
self.serialVersionUID == other.serialVersionUID and
self.flags == other.flags and
self.fields_names == other.fields_names and
self.fields_types == other.fields_types and
self.superclass == other.superclass)
class JavaObject(object):
"""
Represents a deserialized non-primitive Java object
"""
def __init__(self):
"""
Sets up members
"""
self.classdesc = None
self.annotations = []
def get_class(self):
"""
Returns the JavaClass that defines the type of this object
"""
return self.classdesc
def __str__(self):
"""
String representation
"""
return self.__repr__()
def __repr__(self):
"""
String representation
"""
name = "UNKNOWN"
if self.classdesc:
name = self.classdesc.name
return "<javaobj:{0}>".format(name)
def __eq__(self, other):
"""
Equality test between two Java classes
:param other: Other JavaClass to test
:return: True if both classes share the same fields and name
"""
if not isinstance(other, type(self)):
return False
res = (self.classdesc == other.classdesc and
self.annotations == other.annotations)
if not res:
return False
for name in self.classdesc.fields_names:
if not getattr(self, name) == getattr(other, name):
return False
return True
class JavaString(str):
"""
Represents a Java String
"""
def __hash__(self):
return str.__hash__(self)
def __eq__(self, other):
if not isinstance(other, str):
return False
return str.__eq__(self, other)
class JavaEnum(JavaObject):
"""
Represents a Java enumeration
"""
def __init__(self, constant=None):
super(JavaEnum, self).__init__()
self.constant = constant
class JavaArray(list, JavaObject):
"""
Represents a Java Array
"""
def __init__(self, classdesc=None):
list.__init__(self)
JavaObject.__init__(self)
self.classdesc = classdesc
class JavaByteArray(bytearray, JavaObject):
"""
Represents the special case of Java Array which contains bytes
"""
def __init__(self, data, classdesc=None):
bytearray.__init__(self, data)
JavaObject.__init__(self)
self.classdesc = classdesc
# ------------------------------------------------------------------------------
class JavaObjectConstants(object):
"""
Defines the constants of the Java serialization format
"""
STREAM_MAGIC = 0xaced
STREAM_VERSION = 0x05
TC_NULL = 0x70
TC_REFERENCE = 0x71
TC_CLASSDESC = 0x72
TC_OBJECT = 0x73
TC_STRING = 0x74
TC_ARRAY = 0x75
TC_CLASS = 0x76
TC_BLOCKDATA = 0x77
TC_ENDBLOCKDATA = 0x78
TC_RESET = 0x79
TC_BLOCKDATALONG = 0x7A
TC_EXCEPTION = 0x7B
TC_LONGSTRING = 0x7C
TC_PROXYCLASSDESC = 0x7D
TC_ENUM = 0x7E
# Ignore TC_MAX: we don't use it and it messes with TC_ENUM
# TC_MAX = 0x7E
# classDescFlags
SC_WRITE_METHOD = 0x01 # if SC_SERIALIZABLE
SC_BLOCK_DATA = 0x08 # if SC_EXTERNALIZABLE
SC_SERIALIZABLE = 0x02
SC_EXTERNALIZABLE = 0x04
SC_ENUM = 0x10
# type definition chars (typecode)
TYPE_BYTE = 'B' # 0x42
TYPE_CHAR = 'C' # 0x43
TYPE_DOUBLE = 'D' # 0x44
TYPE_FLOAT = 'F' # 0x46
TYPE_INTEGER = 'I' # 0x49
TYPE_LONG = 'J' # 0x4A
TYPE_SHORT = 'S' # 0x53
TYPE_BOOLEAN = 'Z' # 0x5A
TYPE_OBJECT = 'L' # 0x4C
TYPE_ARRAY = '[' # 0x5B
# list of supported typecodes listed above
TYPECODES_LIST = [
# primitive types
TYPE_BYTE,
TYPE_CHAR,
TYPE_DOUBLE,
TYPE_FLOAT,
TYPE_INTEGER,
TYPE_LONG,
TYPE_SHORT,
TYPE_BOOLEAN,
# object types
TYPE_OBJECT,
TYPE_ARRAY]
BASE_REFERENCE_IDX = 0x7E0000
NUMPY_TYPE_MAP = {
TYPE_BYTE: 'B',
TYPE_CHAR: 'b',
TYPE_DOUBLE: '>d',
TYPE_FLOAT: '>f',
TYPE_INTEGER: '>i',
TYPE_LONG: '>l',
TYPE_SHORT: '>h',
TYPE_BOOLEAN: '>B'
}
class OpCodeDebug(object):
"""
OP Codes definition and utility methods
"""
# Type codes
OP_CODE = dict((getattr(JavaObjectConstants, key), key)
for key in dir(JavaObjectConstants)
if key.startswith("TC_"))
TYPE = dict((getattr(JavaObjectConstants, key), key)
for key in dir(JavaObjectConstants)
if key.startswith("TYPE_"))
STREAM_CONSTANT = dict((getattr(JavaObjectConstants, key), key)
for key in dir(JavaObjectConstants)
if key.startswith("SC_"))
@staticmethod
def op_id(op_id):
"""
Returns the name of the given OP Code
:param op_id: OP Code
:return: Name of the OP Code
"""
return OpCodeDebug.OP_CODE.get(
op_id, "<unknown OP:{0}>".format(op_id))
@staticmethod
def type_code(type_id):
"""
Returns the name of the given Type Code
:param type_id: Type code
:return: Name of the type code
"""
return OpCodeDebug.TYPE.get(
type_id, "<unknown Type:{0}>".format(type_id))
@staticmethod
def flags(flags):
"""
Returns the names of the class description flags found in the given
integer
:param flags: A class description flag entry
:return: The flags names as a single string
"""
names = sorted(
descr for key, descr in OpCodeDebug.STREAM_CONSTANT.items()
if key & flags)
return ', '.join(names)
# ------------------------------------------------------------------------------
class JavaObjectUnmarshaller(JavaObjectConstants):
"""
Deserializes a Java serialization stream
"""
def __init__(self, stream, use_numpy_arrays=False):
"""
Sets up members
:param stream: An input stream (opened in binary/bytes mode)
:raise IOError: Invalid input stream
"""
self.use_numpy_arrays = use_numpy_arrays
# Check stream
if stream is None:
raise IOError("No input stream given")
# Prepare the association Terminal Symbol -> Reading method
self.opmap = {
self.TC_NULL: self.do_null,
self.TC_CLASSDESC: self.do_classdesc,
self.TC_OBJECT: self.do_object,
self.TC_STRING: self.do_string,
self.TC_LONGSTRING: self.do_string_long,
self.TC_ARRAY: self.do_array,
self.TC_CLASS: self.do_class,
self.TC_BLOCKDATA: self.do_blockdata,
self.TC_BLOCKDATALONG: self.do_blockdata_long,
self.TC_REFERENCE: self.do_reference,
self.TC_ENUM: self.do_enum,
# note that we are reusing do_null:
self.TC_ENDBLOCKDATA: self.do_null,
}
# Set up members
self.current_object = None
self.reference_counter = 0
self.references = []
self.object_transformers = []
self.object_stream = stream
# Read the stream header (magic & version)
self._readStreamHeader()
def readObject(self, ignore_remaining_data=False):
"""
Reads an object from the input stream
:param ignore_remaining_data: If True, don't log an error when
unused trailing bytes are remaining
:return: The unmarshalled object
:raise Exception: Any exception that occurred during unmarshalling
"""
try:
# TODO: add expects
_, res = self._read_and_exec_opcode(ident=0)
position_bak = self.object_stream.tell()
the_rest = self.object_stream.read()
if not ignore_remaining_data and len(the_rest):
log_error("Warning!!!!: Stream still has {0} bytes left. "
"Enable debug mode of logging to see the hexdump."
.format(len(the_rest)))
log_debug(self._create_hexdump(the_rest))
else:
log_debug("Java Object unmarshalled successfully!")
self.object_stream.seek(position_bak)
return res
except Exception:
self._oops_dump_state(ignore_remaining_data)
raise
def add_transformer(self, transformer):
"""
Appends an object transformer to the deserialization process
:param transformer: An object with a transform(obj) method
"""
self.object_transformers.append(transformer)
def _readStreamHeader(self):
"""
Reads the magic header of a Java serialization stream
:raise IOError: Invalid magic header (not a Java stream)
"""
(magic, version) = self._readStruct(">HH")
if magic != self.STREAM_MAGIC or version != self.STREAM_VERSION:
raise IOError("The stream is not java serialized object. "
"Invalid stream header: {0:04X}{1:04X}"
.format(magic, version))
def _read_and_exec_opcode(self, ident=0, expect=None):
"""
Reads the next opcode, and executes its handler
:param ident: Log identation level
:param expect: A list of expected opcodes
:return: A tuple: (opcode, result of the handler)
:raise IOError: Read opcode is not one of the expected ones
:raise RuntimeError: Unknown opcode
"""
position = self.object_stream.tell()
(opid,) = self._readStruct(">B")
log_debug("OpCode: 0x{0:X} -- {1} (at offset 0x{2:X})"
.format(opid, OpCodeDebug.op_id(opid), position), ident)
if expect and opid not in expect:
raise IOError(
"Unexpected opcode 0x{0:X} -- {1} (at offset 0x{2:X})"
.format(opid, OpCodeDebug.op_id(opid), position))
try:
handler = self.opmap[opid]
except KeyError:
raise RuntimeError(
"Unknown OpCode in the stream: 0x{0:X} (at offset 0x{1:X})"
.format(opid, position))
else:
return opid, handler(ident=ident)
def _readStruct(self, unpack):
"""
Reads from the input stream, using struct
:param unpack: An unpack format string
:return: The result of struct.unpack (tuple)
:raise RuntimeError: End of stream reached during unpacking
"""
length = struct.calcsize(unpack)
ba = self.object_stream.read(length)
if len(ba) != length:
raise RuntimeError("Stream has been ended unexpectedly while "
"unmarshaling.")
return struct.unpack(unpack, ba)
def _readString(self, length_fmt="H"):
"""
Reads a serialized string
:param length_fmt: Structure format of the string length (H or Q)
:return: The deserialized string
:raise RuntimeError: Unexpected end of stream
"""
(length,) = self._readStruct(">{0}".format(length_fmt))
ba = self.object_stream.read(length)
return to_str(ba)
def do_classdesc(self, parent=None, ident=0):
"""
Handles a TC_CLASSDESC opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object
"""
# TC_CLASSDESC className serialVersionUID newHandle classDescInfo
# classDescInfo:
# classDescFlags fields classAnnotation superClassDesc
# classDescFlags:
# (byte) // Defined in Terminal Symbols and Constants
# fields:
# (short)<count> fieldDesc[count]
# fieldDesc:
# primitiveDesc
# objectDesc
# primitiveDesc:
# prim_typecode fieldName
# objectDesc:
# obj_typecode fieldName className1
clazz = JavaClass()
log_debug("[classdesc]", ident)
class_name = self._readString()
clazz.name = class_name
log_debug("Class name: %s" % class_name, ident)
# serialVersionUID is a Java (signed) long => 8 bytes
serialVersionUID, classDescFlags = self._readStruct(">qB")
clazz.serialVersionUID = serialVersionUID
clazz.flags = classDescFlags
self._add_reference(clazz, ident)
log_debug("Serial: 0x{0:X} / {0:d} - classDescFlags: 0x{1:X} {2}"
.format(serialVersionUID, classDescFlags,
OpCodeDebug.flags(classDescFlags)), ident)
(length,) = self._readStruct(">H")
log_debug("Fields num: 0x{0:X}".format(length), ident)
clazz.fields_names = []
clazz.fields_types = []
for fieldId in range(length):
(typecode,) = self._readStruct(">B")
field_name = self._readString()
field_type = self._convert_char_to_type(typecode)
log_debug("> Reading field {0}".format(field_name), ident)
if field_type == self.TYPE_ARRAY:
_, field_type = self._read_and_exec_opcode(
ident=ident + 1,
expect=(self.TC_STRING, self.TC_REFERENCE))
if type(field_type) is not JavaString:
raise AssertionError("Field type must be a JavaString, "
"not {0}".format(type(field_type)))
elif field_type == self.TYPE_OBJECT:
_, field_type = self._read_and_exec_opcode(
ident=ident + 1,
expect=(self.TC_STRING, self.TC_REFERENCE))
if type(field_type) is JavaClass:
# FIXME: ugly trick
field_type = JavaString(field_type.name)
if type(field_type) is not JavaString:
raise AssertionError("Field type must be a JavaString, "
"not {0}".format(type(field_type)))
log_debug("< FieldName: 0x{0:X} Name:{1} Type:{2} ID:{3}"
.format(typecode, field_name, field_type, fieldId),
ident)
assert field_name is not None
assert field_type is not None
clazz.fields_names.append(field_name)
clazz.fields_types.append(field_type)
if parent:
parent.__fields = clazz.fields_names
parent.__types = clazz.fields_types
# classAnnotation
(opid,) = self._readStruct(">B")
log_debug("OpCode: 0x{0:X} -- {1} (classAnnotation)"
.format(opid, OpCodeDebug.op_id(opid)), ident)
if opid != self.TC_ENDBLOCKDATA:
raise NotImplementedError("classAnnotation isn't implemented yet")
# superClassDesc
log_debug("Reading Super Class of {0}".format(clazz.name), ident)
_, superclassdesc = self._read_and_exec_opcode(
ident=ident + 1,
expect=(self.TC_CLASSDESC, self.TC_NULL, self.TC_REFERENCE))
log_debug("Super Class for {0}: {1}"
.format(clazz.name, str(superclassdesc)), ident)
clazz.superclass = superclassdesc
return clazz
def do_blockdata(self, parent=None, ident=0):
"""
Handles TC_BLOCKDATA opcode
:param parent:
:param ident: Log indentation level
:return: A string containing the block data
"""
# TC_BLOCKDATA (unsigned byte)<size> (byte)[size]
log_debug("[blockdata]", ident)
(length,) = self._readStruct(">B")
ba = self.object_stream.read(length)
# Ensure we have an str
return read_to_str(ba)
def do_blockdata_long(self, parent=None, ident=0):
"""
Handles TC_BLOCKDATALONG opcode
:param parent:
:param ident: Log indentation level
:return: A string containing the block data
"""
# TC_BLOCKDATALONG (int)<size> (byte)[size]
log_debug("[blockdatalong]", ident)
(length,) = self._readStruct(">I")
ba = self.object_stream.read(length)
# Ensure we have an str
return read_to_str(ba)
def do_class(self, parent=None, ident=0):
"""
Handles TC_CLASS opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object
"""
# TC_CLASS classDesc newHandle
log_debug("[class]", ident)
# TODO: what to do with "(ClassDesc)prevObject".
# (see 3rd line for classDesc:)
_, classdesc = self._read_and_exec_opcode(
ident=ident + 1,
expect=(self.TC_CLASSDESC, self.TC_PROXYCLASSDESC,
self.TC_NULL, self.TC_REFERENCE))
log_debug("Classdesc: {0}".format(classdesc), ident)
self._add_reference(classdesc, ident)
return classdesc
def do_object(self, parent=None, ident=0):
"""
Handles a TC_OBJECT opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object
"""
# TC_OBJECT classDesc newHandle classdata[] // data for each class
java_object = JavaObject()
log_debug("[object]", ident)
log_debug("java_object.annotations just after instantiation: {0}"
.format(java_object.annotations), ident)
# TODO: what to do with "(ClassDesc)prevObject".
# (see 3rd line for classDesc:)
opcode, classdesc = self._read_and_exec_opcode(
ident=ident + 1,
expect=(self.TC_CLASSDESC, self.TC_PROXYCLASSDESC,
self.TC_NULL, self.TC_REFERENCE))
# self.TC_REFERENCE hasn't shown in spec, but actually is here
# Create object
for transformer in self.object_transformers:
java_object = transformer.create(classdesc)
if java_object:
break
# Store classdesc of this object
java_object.classdesc = classdesc
# Store the reference
self._add_reference(java_object, ident)
# classdata[]
if classdesc.flags & self.SC_EXTERNALIZABLE \
and not classdesc.flags & self.SC_BLOCK_DATA:
# TODO:
raise NotImplementedError("externalContents isn't implemented yet")
if classdesc.flags & self.SC_SERIALIZABLE:
# TODO: look at ObjectInputStream.readSerialData()
# FIXME: Handle the SC_WRITE_METHOD flag
# create megalist
tempclass = classdesc
megalist = []
megatypes = []
log_debug("Constructing class...", ident)
while tempclass:
log_debug("Class: {0}".format(tempclass.name), ident + 1)
class_fields_str = ' - '.join(
' '.join((field_type, field_name))
for field_type, field_name
in zip(tempclass.fields_types, tempclass.fields_names))
if class_fields_str:
log_debug(class_fields_str, ident + 2)
fieldscopy = tempclass.fields_names[:]
fieldscopy.extend(megalist)
megalist = fieldscopy
fieldscopy = tempclass.fields_types[:]
fieldscopy.extend(megatypes)
megatypes = fieldscopy
tempclass = tempclass.superclass
log_debug("Values count: {0}".format(len(megalist)), ident)
log_debug("Prepared list of values: {0}".format(megalist), ident)
log_debug("Prepared list of types: {0}".format(megatypes), ident)
for field_name, field_type in zip(megalist, megatypes):
log_debug("Reading field: {0} - {1}"
.format(field_type, field_name))
res = self._read_value(field_type, ident, name=field_name)
java_object.__setattr__(field_name, res)
if classdesc.flags & self.SC_SERIALIZABLE \
and classdesc.flags & self.SC_WRITE_METHOD \
or classdesc.flags & self.SC_EXTERNALIZABLE \
and classdesc.flags & self.SC_BLOCK_DATA:
# objectAnnotation
log_debug("java_object.annotations before: {0}"
.format(java_object.annotations), ident)
while opcode != self.TC_ENDBLOCKDATA:
opcode, obj = self._read_and_exec_opcode(ident=ident + 1)
# , expect=[self.TC_ENDBLOCKDATA, self.TC_BLOCKDATA,
# self.TC_OBJECT, self.TC_NULL, self.TC_REFERENCE])
if opcode != self.TC_ENDBLOCKDATA:
java_object.annotations.append(obj)
log_debug("objectAnnotation value: {0}".format(obj), ident)
log_debug("java_object.annotations after: {0}"
.format(java_object.annotations), ident)
log_debug(">>> java_object: {0}".format(java_object), ident)
return java_object
def do_string(self, parent=None, ident=0):
"""
Handles a TC_STRING opcode
:param parent:
:param ident: Log indentation level
:return: A string
"""
log_debug("[string]", ident)
ba = JavaString(self._readString())
self._add_reference(ba, ident)
return ba
def do_string_long(self, parent=None, ident=0):
"""
Handles a TC_LONGSTRING opcode
:param parent:
:param ident: Log indentation level
:return: A string
"""
log_debug("[long string]", ident)
ba = JavaString(self._readString("Q"))
self._add_reference(ba, ident)
return ba
def do_array(self, parent=None, ident=0):
"""
Handles a TC_ARRAY opcode
:param parent:
:param ident: Log indentation level
:return: A list of deserialized objects
"""
# TC_ARRAY classDesc newHandle (int)<size> values[size]
log_debug("[array]", ident)
_, classdesc = self._read_and_exec_opcode(
ident=ident + 1,
expect=(self.TC_CLASSDESC, self.TC_PROXYCLASSDESC,
self.TC_NULL, self.TC_REFERENCE))
array = JavaArray(classdesc)
self._add_reference(array, ident)
(size,) = self._readStruct(">i")
log_debug("size: {0}".format(size), ident)
type_char = classdesc.name[0]
assert type_char == self.TYPE_ARRAY
type_char = classdesc.name[1]
if type_char == self.TYPE_OBJECT or type_char == self.TYPE_ARRAY:
for _ in range(size):
_, res = self._read_and_exec_opcode(ident=ident + 1)
log_debug("Object value: {0}".format(res), ident)
array.append(res)
elif type_char == self.TYPE_BYTE:
array = JavaByteArray(self.object_stream.read(size), classdesc)
elif self.use_numpy_arrays:
import numpy
array = numpy.fromfile(
self.object_stream,
dtype=JavaObjectConstants.NUMPY_TYPE_MAP[type_char],
count=size)
else:
for _ in range(size):
res = self._read_value(type_char, ident)
log_debug("Native value: {0}".format(res), ident)
array.append(res)
return array
def do_reference(self, parent=None, ident=0):
"""
Handles a TC_REFERENCE opcode
:param parent:
:param ident: Log indentation level
:return: The referenced object
"""
(handle,) = self._readStruct(">L")
log_debug("## Reference handle: 0x{0:X}".format(handle), ident)
ref = self.references[handle - self.BASE_REFERENCE_IDX]
log_debug("###-> Type: {0} - Value: {1}".format(type(ref), ref), ident)
return ref
@staticmethod
def do_null(parent=None, ident=0):
"""
Handles a TC_NULL opcode
:param parent:
:param ident: Log indentation level
:return: Always None
"""
return None
def do_enum(self, parent=None, ident=0):
"""
Handles a TC_ENUM opcode
:param parent:
:param ident: Log indentation level
:return: A JavaEnum object
"""
# TC_ENUM classDesc newHandle enumConstantName
enum = JavaEnum()
_, classdesc = self._read_and_exec_opcode(
ident=ident + 1,
expect=(self.TC_CLASSDESC, self.TC_PROXYCLASSDESC,
self.TC_NULL, self.TC_REFERENCE))
enum.classdesc = classdesc
self._add_reference(enum, ident)
_, enumConstantName = self._read_and_exec_opcode(
ident=ident + 1, expect=(self.TC_STRING, self.TC_REFERENCE))
enum.constant = enumConstantName
return enum
@staticmethod
def _create_hexdump(src, start_offset=0, length=16):
"""
Prepares an hexadecimal dump string
:param src: A string containing binary data
:param start_offset: The start offset of the source
:param length: Length of a dump line
:return: A dump string
"""
FILTER = ''.join((len(repr(chr(x))) == 3) and chr(x) or '.'
for x in range(256))
pattern = "{{0:04X}} {{1:<{0}}} {{2}}\n".format(length * 3)
# Convert raw data to str (Python 3 compatibility)
src = to_str(src, 'latin-1')
result = []
for i in range(0, len(src), length):
s = src[i:i + length]
hexa = ' '.join("{0:02X}".format(ord(x)) for x in s)
printable = s.translate(FILTER)
result.append(pattern.format(i + start_offset, hexa, printable))
return ''.join(result)
def _read_value(self, field_type, ident, name=""):
"""
Reads the next value, of the given type
:param field_type: A serialization typecode
:param ident: Log indentation
:param name: Field name (for logs)
:return: The read value
:raise RuntimeError: Unknown field type
"""
if len(field_type) > 1:
# We don't need details for arrays and objects
field_type = field_type[0]
if field_type == self.TYPE_BOOLEAN:
(val,) = self._readStruct(">B")
res = bool(val)
elif field_type == self.TYPE_BYTE:
(res,) = self._readStruct(">b")
elif field_type == self.TYPE_CHAR:
# TYPE_CHAR is defined by the serialization specification
# but not used in the implementation, so this is
# a hypothetical code
res = bytes(self._readStruct(">bb")).decode("utf-16-be")
elif field_type == self.TYPE_SHORT:
(res,) = self._readStruct(">h")
elif field_type == self.TYPE_INTEGER:
(res,) = self._readStruct(">i")
elif field_type == self.TYPE_LONG:
(res,) = self._readStruct(">q")
elif field_type == self.TYPE_FLOAT:
(res,) = self._readStruct(">f")
elif field_type == self.TYPE_DOUBLE:
(res,) = self._readStruct(">d")
elif field_type == self.TYPE_OBJECT or field_type == self.TYPE_ARRAY:
_, res = self._read_and_exec_opcode(ident=ident + 1)
else:
raise RuntimeError("Unknown typecode: {0}".format(field_type))
log_debug("* {0} {1}: {2}".format(field_type, name, res), ident)
return res
def _convert_char_to_type(self, type_char):
"""
Ensures a read character is a typecode.
:param type_char: Read typecode
:return: The typecode as a string (using chr)
:raise RuntimeError: Unknown typecode
"""
typecode = type_char
if type(type_char) is int:
typecode = chr(type_char)
if typecode in self.TYPECODES_LIST:
return typecode
else:
raise RuntimeError("Typecode {0} ({1}) isn't supported."
.format(type_char, typecode))
def _add_reference(self, obj, ident=0):
"""
Adds a read reference to the marshaler storage
:param obj: Reference to add
:param ident: Log indentation level
"""
log_debug("## New reference handle 0x{0:X}: {1} -> {2}"
.format(len(self.references) + self.BASE_REFERENCE_IDX,
type(obj).__name__, obj), ident)
self.references.append(obj)
def _oops_dump_state(self, ignore_remaining_data=False):
"""
Log a deserialization error
:param ignore_remaining_data: If True, don't log an error when
unused trailing bytes are remaining
"""
log_error("==Oops state dump" + "=" * (30 - 17))
log_error("References: {0}".format(self.references))
log_error("Stream seeking back at -16 byte (2nd line is an actual "
"position!):")
# Do not use a keyword argument
self.object_stream.seek(-16, os.SEEK_CUR)
position = self.object_stream.tell()
the_rest = self.object_stream.read()
if not ignore_remaining_data and len(the_rest):
log_error("Warning!!!!: Stream still has {0} bytes left."
.format(len(the_rest)))
log_error(self._create_hexdump(the_rest, position))
log_error("=" * 30)
# ------------------------------------------------------------------------------
class JavaObjectMarshaller(JavaObjectConstants):
"""
Serializes objects into Java serialization format
"""
def __init__(self, stream=None):
"""
Sets up members
:param stream: An output stream
"""
self.object_stream = stream
self.object_obj = None
self.object_transformers = []
self.references = []
def add_transformer(self, transformer):
"""
Appends an object transformer to the serialization process
:param transformer: An object with a transform(obj) method
"""
self.object_transformers.append(transformer)
def dump(self, obj):
"""
Dumps the given object in the Java serialization format
"""
self.references = []
self.object_obj = obj
self.object_stream = BytesIO()
self._writeStreamHeader()
self.writeObject(obj)
return self.object_stream.getvalue()
def _writeStreamHeader(self):
"""
Writes the Java serialization magic header in the serialization stream
"""
self._writeStruct(">HH", 4, (self.STREAM_MAGIC, self.STREAM_VERSION))
def writeObject(self, obj):
"""
Appends an object to the serialization stream
:param obj: A string or a deserialized Java object
:raise RuntimeError: Unsupported type
"""
log_debug("Writing object of type {0}".format(type(obj).__name__))
if isinstance(obj, JavaArray):
# Deserialized Java array
self.write_array(obj)
elif isinstance(obj, JavaEnum):
# Deserialized Java Enum
self.write_enum(obj)
elif isinstance(obj, JavaObject):
# Deserialized Java object
self.write_object(obj)
elif isinstance(obj, JavaString):
# Deserialized String
self.write_string(obj)
elif isinstance(obj, JavaClass):
# Java class
self.write_class(obj)
elif obj is None:
# Null
self.write_null()
elif type(obj) is str:
# String value
self.write_blockdata(obj)
else:
# Unhandled type
raise RuntimeError("Object serialization of type {0} is not "
"supported.".format(type(obj)))
def _writeStruct(self, unpack, length, args):
"""
Appends data to the serialization stream
:param unpack: Struct format string
:param length: Unused
:param args: Struct arguments
"""
ba = struct.pack(unpack, *args)
self.object_stream.write(ba)
def _writeString(self, obj, use_reference=True):
"""
Appends a string to the serialization stream
:param obj: String to serialize
:param use_reference: If True, allow writing a reference
"""
# TODO: Convert to "modified UTF-8"
# http://docs.oracle.com/javase/7/docs/api/java/io/DataInput.html#modified-utf-8
string = to_bytes(obj, "utf-8")
if use_reference and isinstance(obj, JavaString):
try:
idx = self.references.index(obj)
except ValueError:
# First appearance of the string
self.references.append(obj)
logging.debug(
"*** Adding ref 0x%X for string: %s",
len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj)
self._writeStruct(">H", 2, (len(string),))
self.object_stream.write(string)
else:
# Write a reference to the previous type
logging.debug("*** Reusing ref 0x%X for string: %s",
idx + self.BASE_REFERENCE_IDX, obj)
self.write_reference(idx)
else:
self._writeStruct(">H", 2, (len(string),))
self.object_stream.write(string)
def write_string(self, obj, use_reference=True):
"""
Writes a Java string with the TC_STRING type marker
:param obj: The string to print
:param use_reference: If True, allow writing a reference
"""
if use_reference and isinstance(obj, JavaString):
try:
idx = self.references.index(obj)
except ValueError:
# String is not referenced: let _writeString store it
self._writeStruct(">B", 1, (self.TC_STRING,))
self._writeString(obj, use_reference)
else:
# Reuse the referenced string
logging.debug("*** Reusing ref 0x%X for String: %s",
idx + self.BASE_REFERENCE_IDX, obj)
self.write_reference(idx)
else:
# Don't use references
self._writeStruct(">B", 1, (self.TC_STRING,))
self._writeString(obj, use_reference)
def write_enum(self, obj):
"""
Writes an Enum value
:param obj: A JavaEnum object
"""
# FIXME: the output doesn't have the same references as the real
# serializable form
self._writeStruct(">B", 1, (self.TC_ENUM,))
try:
idx = self.references.index(obj)
except ValueError:
# New reference
self.references.append(obj)
logging.debug(
"*** Adding ref 0x%X for enum: %s",
len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj)
self.write_classdesc(obj.get_class())
else:
self.write_reference(idx)
self.write_string(obj.constant)
def write_blockdata(self, obj, parent=None):
"""
Appends a block of data to the serialization stream
:param obj: String form of the data block
"""
if type(obj) is str:
# Latin-1: keep bytes as is
obj = to_bytes(obj, "latin-1")
length = len(obj)
if length <= 256:
# Small block data
# TC_BLOCKDATA (unsigned byte)<size> (byte)[size]
self._writeStruct(">B", 1, (self.TC_BLOCKDATA,))
self._writeStruct(">B", 1, (length,))
else:
# Large block data
# TC_BLOCKDATALONG (unsigned int)<size> (byte)[size]
self._writeStruct(">B", 1, (self.TC_BLOCKDATALONG,))
self._writeStruct(">I", 1, (length,))
self.object_stream.write(obj)
def write_null(self):
"""
Writes a "null" value
"""
self._writeStruct(">B", 1, (self.TC_NULL,))
def write_object(self, obj, parent=None):
"""
Writes an object header to the serialization stream
:param obj: Not yet used
:param parent: Not yet used
"""
# Transform object
for transformer in self.object_transformers:
tmp_object = transformer.transform(obj)
if tmp_object is not obj:
obj = tmp_object
break
self._writeStruct(">B", 1, (self.TC_OBJECT,))
cls = obj.get_class()
self.write_classdesc(cls)
# Add reference
self.references.append([])
logging.debug(
"*** Adding ref 0x%X for object %s",
len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj)
all_names = collections.deque()
all_types = collections.deque()
tmpcls = cls
while tmpcls:
all_names.extendleft(reversed(tmpcls.fields_names))
all_types.extendleft(reversed(tmpcls.fields_types))
tmpcls = tmpcls.superclass
del tmpcls
logging.debug("<=> Field names: %s", all_names)
logging.debug("<=> Field types: %s", all_types)
for field_name, field_type in zip(all_names, all_types):
try:
logging.debug("Writing field %s (%s): %s",
field_name, field_type, getattr(obj, field_name))
self._write_value(field_type, getattr(obj, field_name))
except AttributeError as ex:
log_error("No attribute {0} for object {1}\nDir: {2}"
.format(ex, repr(obj), dir(obj)))
raise
del all_names, all_types
if cls.flags & self.SC_SERIALIZABLE \
and cls.flags & self.SC_WRITE_METHOD \
or cls.flags & self.SC_EXTERNALIZABLE \
and cls.flags & self.SC_BLOCK_DATA:
for annotation in obj.annotations:
log_debug("Write annotation {0} for {1}"
.format(repr(annotation), repr(obj)))
if annotation is None:
self.write_null()
else:
self.writeObject(annotation)
self._writeStruct('>B', 1, (self.TC_ENDBLOCKDATA,))
def write_class(self, obj, parent=None):
"""
Writes a class to the stream
:param obj: A JavaClass object
:param parent:
"""
self._writeStruct(">B", 1, (self.TC_CLASS,))
self.write_classdesc(obj)
def write_classdesc(self, obj, parent=None):
"""
Writes a class description
:param obj: Class description to write
:param parent:
"""
if obj not in self.references:
# Add reference
self.references.append(obj)
logging.debug(
"*** Adding ref 0x%X for classdesc %s",
len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj.name)
self._writeStruct(">B", 1, (self.TC_CLASSDESC,))
self._writeString(obj.name)
self._writeStruct(">qB", 1, (obj.serialVersionUID, obj.flags))
self._writeStruct(">H", 1, (len(obj.fields_names),))
for field_name, field_type \
in zip(obj.fields_names, obj.fields_types):
self._writeStruct(
">B", 1, (self._convert_type_to_char(field_type),))
self._writeString(field_name)
if field_type[0] in (self.TYPE_OBJECT, self.TYPE_ARRAY):
try:
idx = self.references.index(field_type)
except ValueError:
# First appearance of the type
self.references.append(field_type)
logging.debug(
"*** Adding ref 0x%X for field type %s",
len(self.references) - 1 + self.BASE_REFERENCE_IDX,
field_type)
self.write_string(field_type, False)
else:
# Write a reference to the previous type
logging.debug("*** Reusing ref 0x%X for %s (%s)",
idx + self.BASE_REFERENCE_IDX,
field_type, field_name)
self.write_reference(idx)
self._writeStruct(">B", 1, (self.TC_ENDBLOCKDATA,))
if obj.superclass:
self.write_classdesc(obj.superclass)
else:
self.write_null()
else:
# Use reference
self.write_reference(self.references.index(obj))
def write_reference(self, ref_index):
"""
Writes a reference
:param ref_index: Local index (0-based) to the reference
"""
self._writeStruct(
">BL", 1, (self.TC_REFERENCE, ref_index + self.BASE_REFERENCE_IDX))
def write_array(self, obj):
"""
Writes a JavaArray
:param obj: A JavaArray object
"""
classdesc = obj.get_class()
self._writeStruct(">B", 1, (self.TC_ARRAY,))
self.write_classdesc(classdesc)
self._writeStruct(">i", 1, (len(obj),))
# Add reference
self.references.append(obj)
logging.debug(
"*** Adding ref 0x%X for array []",
len(self.references) - 1 + self.BASE_REFERENCE_IDX)
type_char = classdesc.name[0]
assert type_char == self.TYPE_ARRAY
type_char = classdesc.name[1]
if type_char == self.TYPE_OBJECT:
for o in obj:
self._write_value(classdesc.name[1:], o)
elif type_char == self.TYPE_ARRAY:
for a in obj:
self.write_array(a)
else:
log_debug("Write array of type %s" % type_char)
for v in obj:
self._write_value(type_char, v)
def _write_value(self, field_type, value):
"""
Writes an item of an array
:param field_type: Value type
:param value: The value itself
"""
if len(field_type) > 1:
# We don't need details for arrays and objects
field_type = field_type[0]
if field_type == self.TYPE_BOOLEAN:
self._writeStruct(">B", 1, (1 if value else 0,))
elif field_type == self.TYPE_BYTE:
self._writeStruct(">b", 1, (value,))
elif field_type == self.TYPE_SHORT:
self._writeStruct(">h", 1, (value,))
elif field_type == self.TYPE_INTEGER:
self._writeStruct(">i", 1, (value,))
elif field_type == self.TYPE_LONG:
self._writeStruct(">q", 1, (value,))
elif field_type == self.TYPE_FLOAT:
self._writeStruct(">f", 1, (value,))
elif field_type == self.TYPE_DOUBLE:
self._writeStruct(">d", 1, (value,))
elif field_type == self.TYPE_OBJECT or field_type == self.TYPE_ARRAY:
if value is None:
self.write_null()
elif isinstance(value, JavaEnum):
self.write_enum(value)
elif isinstance(value, (JavaArray, JavaByteArray)):
self.write_array(value)
elif isinstance(value, JavaObject):
self.write_object(value)
elif isinstance(value, JavaString):
self.write_string(value)
elif isinstance(value, str):
self.write_blockdata(value)
else:
raise RuntimeError("Unknown typecode: {0}".format(field_type))
else:
raise RuntimeError("Unknown typecode: {0}".format(field_type))
def _convert_type_to_char(self, type_char):
"""
Converts the given type code to an int
:param type_char: A type code character
"""
typecode = type_char
if type(type_char) is int:
typecode = chr(type_char)
if typecode in self.TYPECODES_LIST:
return ord(typecode)
elif len(typecode) > 1:
if typecode[0] == 'L':
return ord(self.TYPE_OBJECT)
elif typecode[0] == '[':
return ord(self.TYPE_ARRAY)
raise RuntimeError("Typecode {0} ({1}) isn't supported."
.format(type_char, typecode))
# ------------------------------------------------------------------------------
class DefaultObjectTransformer(object):
"""
Default transformer for the deserialized objects.
Converts JavaObject objects to Python types (maps, lists, ...)
"""
class JavaList(list, JavaObject):
"""
Python-Java list bridge type
"""
def __init__(self, *args, **kwargs):
list.__init__(self, *args, **kwargs)
JavaObject.__init__(self)
class JavaMap(dict, JavaObject):
"""
Python-Java dictionary/map bridge type
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
JavaObject.__init__(self)
TYPE_MAPPER = {
"java.util.ArrayList": JavaList,
"java.util.LinkedList": JavaList,
"java.util.HashMap": JavaMap,
"java.util.LinkedHashMap": JavaMap,
"java.util.TreeMap": JavaMap,
}
def create(self, classdesc):
"""
Transforms a deserialized Java object into a Python object
:param classdesc: The description of a Java class
:return: The Python form of the object, or the original JavaObject
"""
try:
mapped_type = self.TYPE_MAPPER[classdesc.name]
except KeyError:
# Return a JavaObject by default
return JavaObject()
else:
log_debug("---")
log_debug(classdesc.name)
log_debug("---")
java_object = mapped_type()
log_debug(">>> java_object: {0}".format(java_object))
return java_object
|
|
"""Copyright (c) 2015 Francesco Mastellone
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
The road system is implemented as a graph of inter-referenced Way objects.
Ways keep track of which Vehicles are travelling along them, to inform other
Vehicles of who's in front of them and how far ahead they are, so that they may
keep a safe distance.
Intersections are a considerable approximation in that only one Way at a time is
allowed through. Still figuring out ways to prevent the collisions that would
result otherwise.
Cars have their origin in the back.
Each Way has a traffic_light value, that Intersections set.
...There's surely a better way to do that.
"""
from random import random, choice
from math import hypot
MAX_POS = 9999.
class Intersection:
def __init__(self, ways=None):
self.ways = ways if ways else []
self.i = 0 # Active way
self.t = 0.
self.go = True
self.go_time = 6.
self.stop_time = 2. # Yellow traffic light
def update(self, dt):
self.t -= dt
if self.t < 0.:
if self.go:
self.ways[self.i].traffic_light = 'yellow'
self.t = self.stop_time
self.go = False
else:
self.ways[self.i].traffic_light = 'red'
self.i += 1
self.i %= len(self.ways)
self.ways[self.i].traffic_light = 'green'
self.t = self.go_time
self.go = True
class Way:
def __init__(self, speed_limit=13.89/8.):
self.traffic_light = 'green'
self.speed_limit = speed_limit # m/s
self.to = [] # Directed graph neighbors
self.cars = []
def next_car(self, car):
"""Picks the next [pos, car] from self and self.to"""
self.cars.sort(key=lambda car: car.pos)
i = self.cars.index(car)
if i + 1 < len(self.cars):
car2 = self.cars[i + 1]
return car2.pos, car2
elif self.to and self.to[0].cars: # TODO foresee where car goes
car2 = self.to[0].cars[0]
return car2.pos + self.length, car2
else:
return None, None
def next_obstacle_position(self, car):
"""Returns distance from obstacle obstacle.pos or None if no obstacles
are in sight"""
obpos = MAX_POS # Obstacle position
# Traffic lights
if self.traffic_light == 'red':
obpos = self.length
elif self.traffic_light == 'yellow':
if car.pos + car.safety_distance < self.length:
obpos = self.length
next_car_pos, _ = self.next_car(car)
if next_car_pos and next_car_pos < obpos:
obpos = next_car_pos
return obpos
def reach(self, car):
self.cars.insert(0, car)
def leave(self, car):
self.cars.remove(car)
class LinearWay(Way):
def __init__(self, x0, y0, x1, y1, *args, **kwargs):
super().__init__(*args, **kwargs)
self.x0 = x0
self.y0 = y0
self.dx = x1 - x0
self.dy = y1 - y0
self.length = hypot(x1 - x0, y1 - y0) # m
x1 = property(lambda self: self.x0 + self.dx)
y1 = property(lambda self: self.y0 + self.dy)
def position_car(self, car):
car.x = self.x0 + self.dx * (car.pos / self.length)
car.y = self.y0 + self.dy * (car.pos / self.length)
class BezierWay(Way):
def __init__(self, x0, y0, x1, y1, x2, y2, x3, y3, *args, **kwargs):
super().__init__(*args, **kwargs)
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.x3 = x3
self.y3 = y3
self.length = 0.
xp = self.x0
yp = self.y0
for i in range(1, 65):
t = float(i) / 64.
u = 1. - t
x = u*u*u*self.x0 + \
3.*u*u*t*self.x1 + \
3.*u*t*t*self.x2 + \
t*t*t*self.x3
y = u*u*u*self.y0 + \
3.*u*u*t*self.y1 + \
3.*u*t*t*self.y2 + \
t*t*t*self.y3
self.length += hypot(x - xp, y - yp)
xp = x
yp = y
def position_car(self, car):
t = car.pos / self.length
u = 1. - t
car.x = u*u*u*self.x0 + \
3.*u*u*t*self.x1 + \
3.*u*t*t*self.x2 + \
t*t*t*self.x3
car.y = u*u*u*self.y0 + \
3.*u*u*t*self.y1 + \
3.*u*t*t*self.y2 + \
t*t*t*self.y3
class Vehicle:
length = 4.
acceleration = 7.84 / 2.
deceleration = 7.84 # Max possible with g=9.81m/s, frictioncoeff=.8
def __init__(self, way):
self.pos = 0. # m along current way
self.speed = 0. # m/s
self.speed_mul = 1.0 - 0.3 * random() # % of speedlimit this car reaches
self.way = way
self.x = self.xp = way.x0
self.y = self.yp = way.y0
def update(self, dt):
if self.pos > self.way.length:
self.way.leave(self)
self.pos -= self.way.length
if self.way.to:
self.way = choice(self.way.to)
self.way.reach(self)
else:
self.on_dead_end()
w = self.way
# Reach top speed / Decelerate to top speed
if self.speed < w.speed_limit * self.speed_mul:
acc = self.acceleration
else:
acc = 0.
# Keep safe distance from obstacles(cars, traffic stops...)
obstacle_pos = w.next_obstacle_position(self)
obstacle_dist = obstacle_pos - self.pos
if obstacle_dist < self.safety_distance + self.length * 1.5:
acc = -self.deceleration
self.speed += acc * dt
if self.speed < 0.:
self.speed = 0.
self.pos += self.speed * dt
# update coordinates
self.xp = self.x
self.yp = self.y
self.way.position_car(self)
@property
def safety_distance(self):
t = self.speed / self.deceleration # Braking time
return self.speed * t + self.deceleration * t * t
def on_dead_end(self):
"""Called when leaving a dead end Way. e.g.: to destroy self."""
pass
|
|
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
"""
git-authors [OPTIONS] REV1..REV2
List the authors who contributed within a given revision interval.
To change the name mapping, edit .mailmap on the top-level of the
repository.
"""
# Author: Pauli Virtanen <pav@iki.fi>. This script is in the public domain.
import optparse
import re
import sys
import os
import io
import subprocess
stdout_b = sys.stdout.buffer
MAILMAP_FILE = os.path.join(os.path.dirname(__file__), "..", ".mailmap")
def main():
p = optparse.OptionParser(__doc__.strip())
p.add_option("-d", "--debug", action="store_true",
help="print debug output")
p.add_option("-n", "--new", action="store_true",
help="print debug output")
options, args = p.parse_args()
if len(args) != 1:
p.error("invalid number of arguments")
try:
rev1, rev2 = args[0].split('..')
except ValueError:
p.error("argument is not a revision range")
NAME_MAP = load_name_map(MAILMAP_FILE)
# Analyze log data
all_authors = set()
authors = set()
def analyze_line(line, names, disp=False):
line = line.strip().decode('utf-8')
# Check the commit author name
m = re.match(u'^@@@([^@]*)@@@', line)
if m:
name = m.group(1)
line = line[m.end():]
name = NAME_MAP.get(name, name)
if disp:
if name not in names:
stdout_b.write((" - Author: %s\n" % name).encode('utf-8'))
names.add(name)
# Look for "thanks to" messages in the commit log
m = re.search(r'([Tt]hanks to|[Cc]ourtesy of) ([A-Z][A-Za-z]*? [A-Z][A-Za-z]*? [A-Z][A-Za-z]*|[A-Z][A-Za-z]*? [A-Z]\. [A-Z][A-Za-z]*|[A-Z][A-Za-z ]*? [A-Z][A-Za-z]*|[a-z0-9]+)($|\.| )', line)
if m:
name = m.group(2)
if name not in (u'this',):
if disp:
stdout_b.write(" - Log : %s\n" % line.strip().encode('utf-8'))
name = NAME_MAP.get(name, name)
names.add(name)
line = line[m.end():].strip()
line = re.sub(r'^(and|, and|, ) ', u'Thanks to ', line)
analyze_line(line.encode('utf-8'), names)
# Find all authors before the named range
for line in git.pipe('log', '--pretty=@@@%an@@@%n@@@%cn@@@%n%b',
'%s' % (rev1,)):
analyze_line(line, all_authors)
# Find authors in the named range
for line in git.pipe('log', '--pretty=@@@%an@@@%n@@@%cn@@@%n%b',
'%s..%s' % (rev1, rev2)):
analyze_line(line, authors, disp=options.debug)
# Sort
def name_key(fullname):
m = re.search(u' [a-z ]*[A-Za-z-]+$', fullname)
if m:
forename = fullname[:m.start()].strip()
surname = fullname[m.start():].strip()
else:
forename = ""
surname = fullname.strip()
if surname.startswith(u'van der '):
surname = surname[8:]
if surname.startswith(u'de '):
surname = surname[3:]
if surname.startswith(u'von '):
surname = surname[4:]
return (surname.lower(), forename.lower())
# generate set of all new authors
if vars(options)['new']:
new_authors = authors.difference(all_authors)
n_authors = list(new_authors)
n_authors.sort(key=name_key)
# Print some empty lines to separate
stdout_b.write(("\n\n").encode('utf-8'))
for author in n_authors:
stdout_b.write(("- %s\n" % author).encode('utf-8'))
# return for early exit so we only print new authors
return
authors = list(authors)
authors.sort(key=name_key)
# Print
stdout_b.write(b"""
Authors
=======
""")
for author in authors:
if author in all_authors:
stdout_b.write(("* %s\n" % author).encode('utf-8'))
else:
stdout_b.write(("* %s +\n" % author).encode('utf-8'))
stdout_b.write(("""
A total of %(count)d people contributed to this release.
People with a "+" by their names contributed a patch for the first time.
This list of names is automatically generated, and may not be fully complete.
""" % dict(count=len(authors))).encode('utf-8'))
stdout_b.write(("\nNOTE: Check this list manually! It is automatically generated "
"and some names\n may be missing.\n").encode('utf-8'))
def load_name_map(filename):
name_map = {}
with io.open(filename, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith(u"#") or not line:
continue
m = re.match(r'^(.*?)\s*<(.*?)>(.*?)\s*<(.*?)>\s*$', line)
if not m:
print("Invalid line in .mailmap: '{!r}'".format(line), file=sys.stderr)
sys.exit(1)
new_name = m.group(1).strip()
old_name = m.group(3).strip()
if old_name and new_name:
name_map[old_name] = new_name
return name_map
#------------------------------------------------------------------------------
# Communicating with Git
#------------------------------------------------------------------------------
class Cmd:
executable = None
def __init__(self, executable):
self.executable = executable
def _call(self, command, args, kw, repository=None, call=False):
cmd = [self.executable, command] + list(args)
cwd = None
if repository is not None:
cwd = os.getcwd()
os.chdir(repository)
try:
if call:
return subprocess.call(cmd, **kw)
else:
return subprocess.Popen(cmd, **kw)
finally:
if cwd is not None:
os.chdir(cwd)
def __call__(self, command, *a, **kw):
ret = self._call(command, a, {}, call=True, **kw)
if ret != 0:
raise RuntimeError("%s failed" % self.executable)
def pipe(self, command, *a, **kw):
stdin = kw.pop('stdin', None)
p = self._call(command, a, dict(stdin=stdin, stdout=subprocess.PIPE),
call=False, **kw)
return p.stdout
def read(self, command, *a, **kw):
p = self._call(command, a, dict(stdout=subprocess.PIPE),
call=False, **kw)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("%s failed" % self.executable)
return out
def readlines(self, command, *a, **kw):
out = self.read(command, *a, **kw)
return out.rstrip("\n").split("\n")
def test(self, command, *a, **kw):
ret = self._call(command, a, dict(stdout=subprocess.PIPE,
stderr=subprocess.PIPE),
call=True, **kw)
return (ret == 0)
git = Cmd("git")
#------------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class TestClient(unittest.TestCase):
PROJECT = "PROJECT"
ZONE_NAME = "zone-name"
@staticmethod
def _get_target_class():
from google.cloud.dns.client import Client
return Client
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
from google.api_core.client_info import ClientInfo
from google.cloud.dns._http import Connection
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._connection.credentials, creds)
self.assertIs(client._connection.http, http)
self.assertIsInstance(client._connection._client_info, ClientInfo)
self.assertEqual(
client._connection.API_BASE_URL, client._connection.DEFAULT_API_ENDPOINT
)
def test_ctor_w_client_info(self):
from google.api_core.client_info import ClientInfo
from google.cloud.dns._http import Connection
client_info = ClientInfo()
creds = _make_credentials()
http = object()
client = self._make_one(
project=self.PROJECT, credentials=creds, _http=http, client_info=client_info
)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._connection.credentials, creds)
self.assertIs(client._connection.http, http)
self.assertIs(client._connection._client_info, client_info)
def test_ctor_w_empty_client_options_object(self):
from google.api_core.client_info import ClientInfo
from google.api_core.client_options import ClientOptions
from google.cloud.dns._http import Connection
creds = _make_credentials()
http = object()
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
client_options=ClientOptions(),
)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._connection.credentials, creds)
self.assertIs(client._connection.http, http)
self.assertIsInstance(client._connection._client_info, ClientInfo)
self.assertEqual(
client._connection.API_BASE_URL, client._connection.DEFAULT_API_ENDPOINT
)
def test_ctor_w_client_options_object(self):
from google.api_core.client_options import ClientOptions
api_endpoint = "https://foo-dns.googleapis.com"
creds = _make_credentials()
http = object()
client_options = ClientOptions(api_endpoint=api_endpoint)
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
client_options=client_options,
)
self.assertEqual(client._connection.API_BASE_URL, api_endpoint)
def test_ctor_w_client_options_dict(self):
api_endpoint = "https://foo-dns.googleapis.com"
creds = _make_credentials()
http = object()
client_options = {"api_endpoint": api_endpoint}
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
client_options=client_options,
)
self.assertEqual(client._connection.API_BASE_URL, api_endpoint)
def test_quotas_defaults(self):
PATH = "projects/%s" % (self.PROJECT,)
MANAGED_ZONES = 1234
RRS_PER_RRSET = 23
RRSETS_PER_ZONE = 345
RRSET_ADDITIONS = 456
RRSET_DELETIONS = 567
TOTAL_SIZE = 67890
DATA = {
"quota": {
"managedZones": MANAGED_ZONES,
"resourceRecordsPerRrset": RRS_PER_RRSET,
"rrsetsPerManagedZone": RRSETS_PER_ZONE,
"rrsetAdditionsPerChange": RRSET_ADDITIONS,
"rrsetDeletionsPerChange": RRSET_DELETIONS,
"totalRrdataSizePerChange": TOTAL_SIZE,
}
}
CONVERTED = {key: int(value) for key, value in DATA["quota"].items()}
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = _Connection(DATA)
quotas = client.quotas()
self.assertEqual(quotas, CONVERTED)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req["method"], "GET")
self.assertEqual(req["path"], "/%s" % PATH)
def test_quotas_w_kind_key(self):
PATH = "projects/%s" % (self.PROJECT,)
MANAGED_ZONES = 1234
RRS_PER_RRSET = 23
RRSETS_PER_ZONE = 345
RRSET_ADDITIONS = 456
RRSET_DELETIONS = 567
TOTAL_SIZE = 67890
DATA = {
"quota": {
"managedZones": MANAGED_ZONES,
"resourceRecordsPerRrset": RRS_PER_RRSET,
"rrsetsPerManagedZone": RRSETS_PER_ZONE,
"rrsetAdditionsPerChange": RRSET_ADDITIONS,
"rrsetDeletionsPerChange": RRSET_DELETIONS,
"totalRrdataSizePerChange": TOTAL_SIZE,
"whitelistedKeySpecs": [
{
"keyType": "keySigning",
"algorithm": "rsasha512",
"keyLength": 2048,
}
],
}
}
CONVERTED = DATA["quota"]
WITH_KIND = {"quota": DATA["quota"].copy()}
WITH_KIND["quota"]["kind"] = "dns#quota"
WITH_KIND["quota"]["whitelistedKeySpecs"][0]["kind"] = "dns#dnsKeySpec"
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = _Connection(WITH_KIND)
quotas = client.quotas()
self.assertEqual(quotas, CONVERTED)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req["method"], "GET")
self.assertEqual(req["path"], "/%s" % PATH)
def test_list_zones_defaults(self):
from google.cloud.dns.zone import ManagedZone
ID_1 = "123"
ZONE_1 = "zone_one"
DNS_1 = "one.example.com"
ID_2 = "234"
ZONE_2 = "zone_two"
DNS_2 = "two.example.com"
PATH = "projects/%s/managedZones" % (self.PROJECT,)
TOKEN = "TOKEN"
DATA = {
"nextPageToken": TOKEN,
"managedZones": [
{
"kind": "dns#managedZone",
"id": ID_1,
"name": ZONE_1,
"dnsName": DNS_1,
},
{
"kind": "dns#managedZone",
"id": ID_2,
"name": ZONE_2,
"dnsName": DNS_2,
},
],
}
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = _Connection(DATA)
iterator = client.list_zones()
page = next(iterator.pages)
zones = list(page)
token = iterator.next_page_token
self.assertEqual(len(zones), len(DATA["managedZones"]))
for found, expected in zip(zones, DATA["managedZones"]):
self.assertIsInstance(found, ManagedZone)
self.assertEqual(found.zone_id, expected["id"])
self.assertEqual(found.name, expected["name"])
self.assertEqual(found.dns_name, expected["dnsName"])
self.assertEqual(token, TOKEN)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req["method"], "GET")
self.assertEqual(req["path"], "/%s" % PATH)
def test_list_zones_explicit(self):
from google.cloud.dns.zone import ManagedZone
ID_1 = "123"
ZONE_1 = "zone_one"
DNS_1 = "one.example.com"
ID_2 = "234"
ZONE_2 = "zone_two"
DNS_2 = "two.example.com"
PATH = "projects/%s/managedZones" % (self.PROJECT,)
TOKEN = "TOKEN"
DATA = {
"managedZones": [
{
"kind": "dns#managedZone",
"id": ID_1,
"name": ZONE_1,
"dnsName": DNS_1,
},
{
"kind": "dns#managedZone",
"id": ID_2,
"name": ZONE_2,
"dnsName": DNS_2,
},
]
}
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = _Connection(DATA)
iterator = client.list_zones(max_results=3, page_token=TOKEN)
page = next(iterator.pages)
zones = list(page)
token = iterator.next_page_token
self.assertEqual(len(zones), len(DATA["managedZones"]))
for found, expected in zip(zones, DATA["managedZones"]):
self.assertIsInstance(found, ManagedZone)
self.assertEqual(found.zone_id, expected["id"])
self.assertEqual(found.name, expected["name"])
self.assertEqual(found.dns_name, expected["dnsName"])
self.assertIsNone(token)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req["method"], "GET")
self.assertEqual(req["path"], "/%s" % PATH)
self.assertEqual(req["query_params"], {"maxResults": 3, "pageToken": TOKEN})
def test_zone_explicit(self):
from google.cloud.dns.zone import ManagedZone
DESCRIPTION = "DESCRIPTION"
DNS_NAME = "test.example.com"
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
zone = client.zone(self.ZONE_NAME, DNS_NAME, DESCRIPTION)
self.assertIsInstance(zone, ManagedZone)
self.assertEqual(zone.name, self.ZONE_NAME)
self.assertEqual(zone.dns_name, DNS_NAME)
self.assertEqual(zone.description, DESCRIPTION)
self.assertIs(zone._client, client)
def test_zone_w_dns_name_wo_description(self):
from google.cloud.dns.zone import ManagedZone
DNS_NAME = "test.example.com"
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
zone = client.zone(self.ZONE_NAME, DNS_NAME)
self.assertIsInstance(zone, ManagedZone)
self.assertEqual(zone.name, self.ZONE_NAME)
self.assertEqual(zone.dns_name, DNS_NAME)
self.assertEqual(zone.description, DNS_NAME)
self.assertIs(zone._client, client)
def test_zone_wo_dns_name(self):
from google.cloud.dns.zone import ManagedZone
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
zone = client.zone(self.ZONE_NAME)
self.assertIsInstance(zone, ManagedZone)
self.assertEqual(zone.name, self.ZONE_NAME)
self.assertIsNone(zone.dns_name)
self.assertIsNone(zone.description)
self.assertIs(zone._client, client)
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.