gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes + cinder.
"""
import copy
import sys
from cinderclient import exceptions as cinder_exception
from cinderclient import service_catalog
from cinderclient.v1 import client as cinder_client
from oslo.config import cfg
from raksha.db import base
from raksha import exception
from raksha.openstack.common import log as logging
cinder_opts = [
cfg.StrOpt('cinder_catalog_info',
default='volume:cinder:publicURL',
help='Info to match when looking for cinder in the service '
'catalog. Format is : separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>'),
cfg.StrOpt('cinder_endpoint_template',
default= 'http://localhost:8776/v1/%(project_id)s', #None,
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
cfg.StrOpt('os_region_name',
default=None,
help='region name of this node'),
cfg.IntOpt('cinder_http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
cfg.BoolOpt('cinder_api_insecure',
default=False,
help='Allow to perform insecure SSL requests to cinder'),
cfg.BoolOpt('cinder_cross_az_attach',
default=True,
help='Allow attach between instance and volume in different '
'availability zones.'),
]
CONF = cfg.CONF
CONF.register_opts(cinder_opts)
LOG = logging.getLogger(__name__)
def cinderclient(context):
# FIXME: the cinderclient ServiceCatalog object is mis-named.
# It actually contains the entire access blob.
# Only needed parts of the service catalog are passed in, see
# nova/context.py.
compat_catalog = {
# TODO(gbasava): Check this... 'access': {'serviceCatalog': context.service_catalog or []}
'access': []
}
sc = service_catalog.ServiceCatalog(compat_catalog)
if CONF.cinder_endpoint_template:
url = CONF.cinder_endpoint_template % context.to_dict()
else:
info = CONF.cinder_catalog_info
service_type, service_name, endpoint_type = info.split(':')
# extract the region if set in configuration
if CONF.os_region_name:
attr = 'region'
filter_value = CONF.os_region_name
else:
attr = None
filter_value = None
url = sc.url_for(attr=attr,
filter_value=filter_value,
service_type=service_type,
service_name=service_name,
endpoint_type=endpoint_type)
LOG.debug(_('Cinderclient connection created using URL: %s') % url)
c = cinder_client.Client(context.user_id,
context.auth_token,
project_id=context.project_id,
auth_url=url,
insecure=CONF.cinder_api_insecure,
retries=CONF.cinder_http_retries)
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
context.project_id)
c.client.management_url = url
return c
def _untranslate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol.id
d['status'] = vol.status
d['size'] = vol.size
d['availability_zone'] = vol.availability_zone
d['created_at'] = vol.created_at
# TODO(jdg): The calling code expects attach_time and
# mountpoint to be set. When the calling
# code is more defensive this can be
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
if vol.attachments:
att = vol.attachments[0]
d['attach_status'] = 'attached'
d['instance_uuid'] = att['server_id']
d['mountpoint'] = att['device']
else:
d['attach_status'] = 'detached'
d['display_name'] = vol.display_name
d['display_description'] = vol.display_description
# TODO(jdg): Information may be lost in this translation
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
d['volume_metadata'] = []
for key, value in vol.metadata.items():
item = {}
item['key'] = key
item['value'] = value
d['volume_metadata'].append(item)
if hasattr(vol, 'volume_image_metadata'):
d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata)
return d
def _untranslate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot.id
d['status'] = snapshot.status
d['progress'] = snapshot.progress
d['size'] = snapshot.size
d['created_at'] = snapshot.created_at
d['display_name'] = snapshot.display_name
d['display_description'] = snapshot.display_description
d['volume_id'] = snapshot.volume_id
d['project_id'] = snapshot.project_id
d['volume_size'] = snapshot.size
return d
class API(base.Base):
"""API for interacting with the volume manager."""
def _reraise_translated_volume_exception(self, volume_id=None):
"""Transform the exception for the volume but keep its traceback
intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = self._translate_volume_exception(volume_id, exc_value)
raise new_exc, None, exc_trace
def _translate_volume_exception(self, volume_id, exc_value):
if isinstance(exc_value, cinder_exception.NotFound):
return exception.VolumeNotFound(volume_id=volume_id)
elif isinstance(exc_value, cinder_exception.BadRequest):
return exception.InvalidInput(reason=exc_value.message)
return exc_value
def get(self, context, volume_id):
try:
item = cinderclient(context).volumes.get(volume_id)
return _untranslate_volume_summary_view(context, item)
except Exception:
self._reraise_translated_volume_exception(volume_id)
def get_all(self, context, search_opts={}):
items = cinderclient(context).volumes.list(detailed=True)
rval = []
for item in items:
rval.append(_untranslate_volume_summary_view(context, item))
return rval
def check_attach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("status must be available")
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("already attached")
raise exception.InvalidVolume(reason=msg)
if instance and not CONF.cinder_cross_az_attach:
if instance['availability_zone'] != volume['availability_zone']:
msg = _("Instance and volume not in same availability_zone")
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("already detached")
raise exception.InvalidVolume(reason=msg)
def reserve_volume(self, context, volume):
cinderclient(context).volumes.reserve(volume['id'])
def unreserve_volume(self, context, volume):
cinderclient(context).volumes.unreserve(volume['id'])
def begin_detaching(self, context, volume):
cinderclient(context).volumes.begin_detaching(volume['id'])
def roll_detaching(self, context, volume):
cinderclient(context).volumes.roll_detaching(volume['id'])
def attach(self, context, volume, instance_uuid, mountpoint):
cinderclient(context).volumes.attach(volume['id'],
instance_uuid,
mountpoint)
def detach(self, context, volume):
cinderclient(context).volumes.detach(volume['id'])
def initialize_connection(self, context, volume, connector):
return cinderclient(context).\
volumes.initialize_connection(volume['id'], connector)
def terminate_connection(self, context, volume, connector):
return cinderclient(context).\
volumes.terminate_connection(volume['id'], connector)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None):
if snapshot is not None:
snapshot_id = snapshot['id']
else:
snapshot_id = None
kwargs = dict(snapshot_id=snapshot_id,
display_name=name,
display_description=description,
volume_type=volume_type,
user_id=context.user_id,
project_id=context.project_id,
availability_zone=availability_zone,
metadata=metadata,
imageRef=image_id)
try:
item = cinderclient(context).volumes.create(size, **kwargs)
return _untranslate_volume_summary_view(context, item)
except Exception:
self._reraise_translated_volume_exception()
def delete(self, context, volume):
cinderclient(context).volumes.delete(volume['id'])
def update(self, context, volume, fields):
raise NotImplementedError()
def get_snapshot(self, context, snapshot_id):
item = cinderclient(context).volume_snapshots.get(snapshot_id)
return _untranslate_snapshot_summary_view(context, item)
def get_all_snapshots(self, context):
items = cinderclient(context).volume_snapshots.list(detailed=True)
rvals = []
for item in items:
rvals.append(_untranslate_snapshot_summary_view(context, item))
return rvals
def create_snapshot(self, context, volume, name, description):
item = cinderclient(context).volume_snapshots.create(volume['id'],
False,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
def create_snapshot_force(self, context, volume, name, description):
item = cinderclient(context).volume_snapshots.create(volume['id'],
True,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
def delete_snapshot(self, context, snapshot):
cinderclient(context).volume_snapshots.delete(snapshot['id'])
def get_volume_metadata(self, context, volume):
raise NotImplementedError()
def delete_volume_metadata(self, context, volume, key):
raise NotImplementedError()
def update_volume_metadata(self, context, volume, metadata, delete=False):
raise NotImplementedError()
def get_volume_metadata_value(self, volume, key):
raise NotImplementedError()
| |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-22 18:21
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import utilities.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ConsolePort',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('connection_status', models.NullBooleanField(choices=[[False, b'Planned'], [True, b'Connected']], default=True)),
],
options={
'ordering': ['device', 'name'],
},
),
migrations.CreateModel(
name='ConsolePortTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
options={
'ordering': ['device_type', 'name'],
},
),
migrations.CreateModel(
name='ConsoleServerPort',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='ConsoleServerPortTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
options={
'ordering': ['device_type', 'name'],
},
),
migrations.CreateModel(
name='Device',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('name', utilities.fields.NullableCharField(blank=True, max_length=50, null=True, unique=True)),
('serial', models.CharField(blank=True, max_length=50, verbose_name=b'Serial number')),
('position', models.PositiveSmallIntegerField(blank=True, help_text=b'Number of the lowest U position occupied by the device', null=True, validators=[django.core.validators.MinValueValidator(1)], verbose_name=b'Position (U)')),
('face', models.PositiveSmallIntegerField(blank=True, choices=[[0, b'Front'], [1, b'Rear']], null=True, verbose_name=b'Rack face')),
('status', models.BooleanField(choices=[[True, b'Active'], [False, b'Offline']], default=True, verbose_name=b'Status')),
('comments', models.TextField(blank=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='DeviceRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField(unique=True)),
('color', models.CharField(choices=[[b'teal', b'Teal'], [b'green', b'Green'], [b'blue', b'Blue'], [b'purple', b'Purple'], [b'yellow', b'Yellow'], [b'orange', b'Orange'], [b'red', b'Red'], [b'light_gray', b'Light Gray'], [b'medium_gray', b'Medium Gray'], [b'dark_gray', b'Dark Gray']], max_length=30)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='DeviceType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model', models.CharField(max_length=50)),
('slug', models.SlugField()),
('u_height', models.PositiveSmallIntegerField(default=1, verbose_name=b'Height (U)')),
('is_full_depth', models.BooleanField(default=True, help_text=b'Device consumes both front and rear rack faces', verbose_name=b'Is full depth')),
('is_console_server', models.BooleanField(default=False, help_text=b'This type of device has console server ports', verbose_name=b'Is a console server')),
('is_pdu', models.BooleanField(default=False, help_text=b'This type of device has power outlets', verbose_name=b'Is a PDU')),
('is_network_device', models.BooleanField(default=True, help_text=b'This type of device has network interfaces', verbose_name=b'Is a network device')),
],
options={
'ordering': ['manufacturer', 'model'],
},
),
migrations.CreateModel(
name='Interface',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('form_factor', models.PositiveSmallIntegerField(choices=[[0, b'Virtual'], [800, b'10/100M (Copper)'], [1000, b'1GE (Copper)'], [1100, b'1GE (SFP)'], [1200, b'10GE (SFP+)'], [1300, b'10GE (XFP)'], [1400, b'40GE (QSFP+)']], default=1200)),
('mgmt_only', models.BooleanField(default=False, help_text=b'This interface is used only for out-of-band management', verbose_name=b'OOB Management')),
('description', models.CharField(blank=True, max_length=100)),
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interfaces', to='dcim.Device')),
],
options={
'ordering': ['device', 'name'],
},
),
migrations.CreateModel(
name='InterfaceConnection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('connection_status', models.BooleanField(choices=[[False, b'Planned'], [True, b'Connected']], default=True, verbose_name=b'Status')),
('interface_a', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='connected_as_a', to='dcim.Interface')),
('interface_b', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='connected_as_b', to='dcim.Interface')),
],
),
migrations.CreateModel(
name='InterfaceTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('form_factor', models.PositiveSmallIntegerField(choices=[[0, b'Virtual'], [800, b'10/100M (Copper)'], [1000, b'1GE (Copper)'], [1100, b'1GE (SFP)'], [1200, b'10GE (SFP+)'], [1300, b'10GE (XFP)'], [1400, b'40GE (QSFP+)']], default=1200)),
('mgmt_only', models.BooleanField(default=False, verbose_name=b'Management only')),
('device_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interface_templates', to='dcim.DeviceType')),
],
options={
'ordering': ['device_type', 'name'],
},
),
migrations.CreateModel(
name='Manufacturer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField(unique=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name=b'Name')),
('part_id', models.CharField(blank=True, max_length=50, verbose_name=b'Part ID')),
('serial', models.CharField(blank=True, max_length=50, verbose_name=b'Serial number')),
('discovered', models.BooleanField(default=False, verbose_name=b'Discovered')),
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='modules', to='dcim.Device')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submodules', to='dcim.Module')),
],
options={
'ordering': ['device__id', 'parent__id', 'name'],
},
),
migrations.CreateModel(
name='Platform',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField(unique=True)),
('rpc_client', models.CharField(blank=True, choices=[[b'juniper-junos', b'Juniper Junos (NETCONF)'], [b'cisco-ios', b'Cisco IOS (SSH)'], [b'opengear', b'Opengear (SSH)']], max_length=30, verbose_name=b'RPC client')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='PowerOutlet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='power_outlets', to='dcim.Device')),
],
),
migrations.CreateModel(
name='PowerOutletTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('device_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='power_outlet_templates', to='dcim.DeviceType')),
],
options={
'ordering': ['device_type', 'name'],
},
),
migrations.CreateModel(
name='PowerPort',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('connection_status', models.NullBooleanField(choices=[[False, b'Planned'], [True, b'Connected']], default=True)),
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='power_ports', to='dcim.Device')),
('power_outlet', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='connected_port', to='dcim.PowerOutlet')),
],
options={
'ordering': ['device', 'name'],
},
),
migrations.CreateModel(
name='PowerPortTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('device_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='power_port_templates', to='dcim.DeviceType')),
],
options={
'ordering': ['device_type', 'name'],
},
),
migrations.CreateModel(
name='Rack',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=50)),
('facility_id', utilities.fields.NullableCharField(blank=True, max_length=30, null=True, verbose_name=b'Facility ID')),
('u_height', models.PositiveSmallIntegerField(default=42, verbose_name=b'Height (U)')),
('comments', models.TextField(blank=True)),
],
options={
'ordering': ['site', 'name'],
},
),
migrations.CreateModel(
name='RackGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField()),
],
options={
'ordering': ['site', 'name'],
},
),
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField(unique=True)),
('facility', models.CharField(blank=True, max_length=50)),
('asn', models.PositiveIntegerField(blank=True, null=True, verbose_name=b'ASN')),
('physical_address', models.CharField(blank=True, max_length=200)),
('shipping_address', models.CharField(blank=True, max_length=200)),
('comments', models.TextField(blank=True)),
],
options={
'ordering': ['name'],
},
),
migrations.AddField(
model_name='rackgroup',
name='site',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rack_groups', to='dcim.Site'),
),
migrations.AddField(
model_name='rack',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='racks', to='dcim.RackGroup'),
),
migrations.AddField(
model_name='rack',
name='site',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='racks', to='dcim.Site'),
),
migrations.AddField(
model_name='devicetype',
name='manufacturer',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='device_types', to='dcim.Manufacturer'),
),
migrations.AddField(
model_name='device',
name='device_role',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='devices', to='dcim.DeviceRole'),
),
migrations.AddField(
model_name='device',
name='device_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='instances', to='dcim.DeviceType'),
),
migrations.AddField(
model_name='device',
name='platform',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='devices', to='dcim.Platform'),
),
]
| |
# This Python file uses the following encoding: utf-8
#
# Copyright 2016 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""Tests for the places module."""
import uuid
from types import GeneratorType
import responses
import googlemaps
from . import TestCase
class PlacesTest(TestCase):
def setUp(self):
self.key = "AIzaasdf"
self.client = googlemaps.Client(self.key)
self.location = (-33.86746, 151.207090)
self.type = "liquor_store"
self.language = "en-AU"
self.region = "AU"
self.radius = 100
@responses.activate
def test_places_find(self):
url = "https://maps.googleapis.com/maps/api/place/findplacefromtext/json"
responses.add(
responses.GET,
url,
body='{"status": "OK", "candidates": []}',
status=200,
content_type="application/json",
)
self.client.find_place(
"restaurant",
"textquery",
fields=["business_status", "geometry/location", "place_id"],
location_bias="point:90,90",
language=self.language,
)
self.assertEqual(1, len(responses.calls))
self.assertURLEqual(
"%s?language=en-AU&inputtype=textquery&"
"locationbias=point:90,90&input=restaurant"
"&fields=business_status,geometry/location,place_id&key=%s"
% (url, self.key),
responses.calls[0].request.url,
)
with self.assertRaises(ValueError):
self.client.find_place("restaurant", "invalid")
with self.assertRaises(ValueError):
self.client.find_place(
"restaurant", "textquery", fields=["geometry", "invalid"]
)
with self.assertRaises(ValueError):
self.client.find_place("restaurant", "textquery", location_bias="invalid")
@responses.activate
def test_places_text_search(self):
url = "https://maps.googleapis.com/maps/api/place/textsearch/json"
responses.add(
responses.GET,
url,
body='{"status": "OK", "results": [], "html_attributions": []}',
status=200,
content_type="application/json",
)
self.client.places(
"restaurant",
location=self.location,
radius=self.radius,
region=self.region,
language=self.language,
min_price=1,
max_price=4,
open_now=True,
type=self.type,
)
self.assertEqual(1, len(responses.calls))
self.assertURLEqual(
"%s?language=en-AU&location=-33.86746%%2C151.20709&"
"maxprice=4&minprice=1&opennow=true&query=restaurant&"
"radius=100®ion=AU&type=liquor_store&key=%s" % (url, self.key),
responses.calls[0].request.url,
)
@responses.activate
def test_places_nearby_search(self):
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
responses.add(
responses.GET,
url,
body='{"status": "OK", "results": [], "html_attributions": []}',
status=200,
content_type="application/json",
)
self.client.places_nearby(
location=self.location,
keyword="foo",
language=self.language,
min_price=1,
max_price=4,
name="bar",
open_now=True,
rank_by="distance",
type=self.type,
)
self.assertEqual(1, len(responses.calls))
self.assertURLEqual(
"%s?keyword=foo&language=en-AU&location=-33.86746%%2C151.20709&"
"maxprice=4&minprice=1&name=bar&opennow=true&rankby=distance&"
"type=liquor_store&key=%s" % (url, self.key),
responses.calls[0].request.url,
)
with self.assertRaises(ValueError):
self.client.places_nearby(radius=self.radius)
with self.assertRaises(ValueError):
self.client.places_nearby(self.location, rank_by="distance")
with self.assertRaises(ValueError):
self.client.places_nearby(
location=self.location,
rank_by="distance",
keyword="foo",
radius=self.radius,
)
@responses.activate
def test_place_detail(self):
url = "https://maps.googleapis.com/maps/api/place/details/json"
responses.add(
responses.GET,
url,
body='{"status": "OK", "result": {}, "html_attributions": []}',
status=200,
content_type="application/json",
)
self.client.place(
"ChIJN1t_tDeuEmsRUsoyG83frY4",
fields=["business_status", "geometry/location", "place_id"],
language=self.language,
)
self.assertEqual(1, len(responses.calls))
self.assertURLEqual(
"%s?language=en-AU&placeid=ChIJN1t_tDeuEmsRUsoyG83frY4"
"&key=%s&fields=business_status,geometry/location,place_id"
% (url, self.key),
responses.calls[0].request.url,
)
with self.assertRaises(ValueError):
self.client.place(
"ChIJN1t_tDeuEmsRUsoyG83frY4", fields=["geometry", "invalid"]
)
@responses.activate
def test_photo(self):
url = "https://maps.googleapis.com/maps/api/place/photo"
responses.add(responses.GET, url, status=200)
ref = "CnRvAAAAwMpdHeWlXl-lH0vp7lez4znKPIWSWvgvZFISdKx45AwJVP1Qp37YOrH7sqHMJ8C-vBDC546decipPHchJhHZL94RcTUfPa1jWzo-rSHaTlbNtjh-N68RkcToUCuY9v2HNpo5mziqkir37WU8FJEqVBIQ4k938TI3e7bf8xq-uwDZcxoUbO_ZJzPxremiQurAYzCTwRhE_V0"
response = self.client.places_photo(ref, max_width=100)
self.assertTrue(isinstance(response, GeneratorType))
self.assertEqual(1, len(responses.calls))
self.assertURLEqual(
"%s?maxwidth=100&photoreference=%s&key=%s" % (url, ref, self.key),
responses.calls[0].request.url,
)
@responses.activate
def test_autocomplete(self):
url = "https://maps.googleapis.com/maps/api/place/autocomplete/json"
responses.add(
responses.GET,
url,
body='{"status": "OK", "predictions": []}',
status=200,
content_type="application/json",
)
session_token = uuid.uuid4().hex
self.client.places_autocomplete(
"Google",
session_token=session_token,
offset=3,
origin=self.location,
location=self.location,
radius=self.radius,
language=self.language,
types="geocode",
components={"country": "au"},
strict_bounds=True,
)
self.assertEqual(1, len(responses.calls))
self.assertURLEqual(
"%s?components=country%%3Aau&input=Google&language=en-AU&"
"origin=-33.86746%%2C151.20709&"
"location=-33.86746%%2C151.20709&offset=3&radius=100&"
"strictbounds=true&types=geocode&key=%s&sessiontoken=%s"
% (url, self.key, session_token),
responses.calls[0].request.url,
)
@responses.activate
def test_autocomplete_query(self):
url = "https://maps.googleapis.com/maps/api/place/queryautocomplete/json"
responses.add(
responses.GET,
url,
body='{"status": "OK", "predictions": []}',
status=200,
content_type="application/json",
)
self.client.places_autocomplete_query("pizza near New York")
self.assertEqual(1, len(responses.calls))
self.assertURLEqual(
"%s?input=pizza+near+New+York&key=%s" % (url, self.key),
responses.calls[0].request.url,
)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_delete_value_request(
tag_name: str,
tag_value: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}')
path_format_arguments = {
"tagName": _SERIALIZER.url("tag_name", tag_name, 'str'),
"tagValue": _SERIALIZER.url("tag_value", tag_value, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_value_request(
tag_name: str,
tag_value: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}')
path_format_arguments = {
"tagName": _SERIALIZER.url("tag_name", tag_name, 'str'),
"tagValue": _SERIALIZER.url("tag_value", tag_value, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
tag_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/tagNames/{tagName}')
path_format_arguments = {
"tagName": _SERIALIZER.url("tag_name", tag_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request(
tag_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/tagNames/{tagName}')
path_format_arguments = {
"tagName": _SERIALIZER.url("tag_name", tag_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/tagNames')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class TagsOperations(object):
"""TagsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def delete_value(
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> None:
"""Deletes a tag value.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to delete.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
template_url=self.delete_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_value.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}'} # type: ignore
@distributed_trace
def create_or_update_value(
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> "_models.TagValue":
"""Creates a tag value. The name of the tag must already exist.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to create.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagValue, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.TagValue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagValue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_or_update_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
template_url=self.create_or_update_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagValue', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagValue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_value.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}'} # type: ignore
@distributed_trace
def create_or_update(
self,
tag_name: str,
**kwargs: Any
) -> "_models.TagDetails":
"""Creates a tag in the subscription.
The tag name can have a maximum of 512 characters and is case insensitive. Tag names created by
Azure have prefixes of microsoft, azure, or windows. You cannot create tags with one of these
prefixes.
:param tag_name: The name of the tag to create.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagDetails, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.TagDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_or_update_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagDetails', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}'} # type: ignore
@distributed_trace
def delete(
self,
tag_name: str,
**kwargs: Any
) -> None:
"""Deletes a tag from the subscription.
You must remove all values from a resource tag before you can delete it.
:param tag_name: The name of the tag.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.TagsListResult"]:
"""Gets the names and values of all resource tags that are defined in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagsListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.TagsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("TagsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames'} # type: ignore
| |
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
from pyasn1.type import base, tag, univ, char, useful
from pyasn1.codec.ber import eoo
from pyasn1.compat.octets import int2oct, oct2int, ints2octs, null, str2octs
from pyasn1.compat.integer import to_bytes
from pyasn1 import debug, error
__all__ = ['encode']
class AbstractItemEncoder(object):
supportIndefLenMode = 1
# noinspection PyMethodMayBeStatic
def encodeTag(self, singleTag, isConstructed):
tagClass, tagFormat, tagId = singleTag
encodedTag = tagClass | tagFormat
if isConstructed:
encodedTag |= tag.tagFormatConstructed
if tagId < 31:
return (encodedTag | tagId,)
else:
substrate = (tagId & 0x7f,)
tagId >>= 7
while tagId:
substrate = (0x80 | (tagId & 0x7f),) + substrate
tagId >>= 7
return (encodedTag | 0x1F,) + substrate
def encodeLength(self, length, defMode):
if not defMode and self.supportIndefLenMode:
return (0x80,)
if length < 0x80:
return (length,)
else:
substrate = ()
while length:
substrate = (length & 0xff,) + substrate
length >>= 8
substrateLen = len(substrate)
if substrateLen > 126:
raise error.PyAsn1Error('Length octets overflow (%d)' % substrateLen)
return (0x80 | substrateLen,) + substrate
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
raise error.PyAsn1Error('Not implemented')
def _encodeEndOfOctets(self, encodeFun, defMode):
if defMode or not self.supportIndefLenMode:
return null
else:
return encodeFun(eoo.endOfOctets, defMode)
def encode(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
substrate, isConstructed, isOctets = self.encodeValue(
encodeFun, value, defMode, maxChunkSize, ifNotEmpty=ifNotEmpty
)
if ifNotEmpty and not substrate:
return substrate
tagSet = value.tagSet
# tagged value?
if tagSet:
if not isConstructed: # primitive form implies definite mode
defMode = True
header = self.encodeTag(tagSet[-1], isConstructed)
header += self.encodeLength(len(substrate), defMode)
if isOctets:
substrate = ints2octs(header) + substrate
else:
substrate = ints2octs(header + substrate)
eoo = self._encodeEndOfOctets(encodeFun, defMode)
if eoo:
substrate += eoo
return substrate
class EndOfOctetsEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
return null, False, True
class ExplicitlyTaggedItemEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
if isinstance(value, base.AbstractConstructedAsn1Item):
value = value.clone(tagSet=value.tagSet[:-1], cloneValueFlag=True)
else:
value = value.clone(tagSet=value.tagSet[:-1])
return encodeFun(value, defMode, maxChunkSize, ifNotEmpty=ifNotEmpty), True, True
explicitlyTaggedItemEncoder = ExplicitlyTaggedItemEncoder()
class BooleanEncoder(AbstractItemEncoder):
supportIndefLenMode = False
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
return value and (1,) or (0,), False, False
class IntegerEncoder(AbstractItemEncoder):
supportIndefLenMode = False
supportCompactZero = False
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
if value == 0:
# de-facto way to encode zero
if self.supportCompactZero:
return (), False, False
else:
return (0,), False, False
return to_bytes(int(value), signed=True), False, True
class BitStringEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
valueLength = len(value)
if valueLength % 8:
alignedValue = value << (8 - valueLength % 8)
else:
alignedValue = value
if not maxChunkSize or len(alignedValue) <= maxChunkSize * 8:
substrate = alignedValue.asOctets()
return int2oct(len(substrate) * 8 - valueLength) + substrate, False, True
stop = 0
substrate = null
while stop < valueLength:
start = stop
stop = min(start + maxChunkSize * 8, valueLength)
substrate += encodeFun(alignedValue[start:stop], defMode, maxChunkSize, ifNotEmpty=ifNotEmpty)
return substrate, True, True
class OctetStringEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
if not maxChunkSize or len(value) <= maxChunkSize:
return value.asOctets(), False, True
else:
pos = 0
substrate = null
while True:
v = value.clone(value[pos:pos + maxChunkSize])
if not v:
break
substrate += encodeFun(v, defMode, maxChunkSize, ifNotEmpty=ifNotEmpty)
pos += maxChunkSize
return substrate, True, True
class NullEncoder(AbstractItemEncoder):
supportIndefLenMode = False
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
return null, False, True
class ObjectIdentifierEncoder(AbstractItemEncoder):
supportIndefLenMode = False
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
oid = value.asTuple()
# Build the first pair
try:
first = oid[0]
second = oid[1]
except IndexError:
raise error.PyAsn1Error('Short OID %s' % (value,))
if 0 <= second <= 39:
if first == 1:
oid = (second + 40,) + oid[2:]
elif first == 0:
oid = (second,) + oid[2:]
elif first == 2:
oid = (second + 80,) + oid[2:]
else:
raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,))
elif first == 2:
oid = (second + 80,) + oid[2:]
else:
raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,))
octets = ()
# Cycle through subIds
for subOid in oid:
if 0 <= subOid <= 127:
# Optimize for the common case
octets += (subOid,)
elif subOid > 127:
# Pack large Sub-Object IDs
res = (subOid & 0x7f,)
subOid >>= 7
while subOid:
res = (0x80 | (subOid & 0x7f),) + res
subOid >>= 7
# Add packed Sub-Object ID to resulted Object ID
octets += res
else:
raise error.PyAsn1Error('Negative OID arc %s at %s' % (subOid, value))
return octets, False, False
class RealEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
binEncBase = 2 # set to None to choose encoding base automatically
@staticmethod
def _dropFloatingPoint(m, encbase, e):
ms, es = 1, 1
if m < 0:
ms = -1 # mantissa sign
if e < 0:
es = -1 # exponenta sign
m *= ms
if encbase == 8:
m *= 2 ** (abs(e) % 3 * es)
e = abs(e) // 3 * es
elif encbase == 16:
m *= 2 ** (abs(e) % 4 * es)
e = abs(e) // 4 * es
while True:
if int(m) != m:
m *= encbase
e -= 1
continue
break
return ms, int(m), encbase, e
def _chooseEncBase(self, value):
m, b, e = value
encBase = [2, 8, 16]
if value.binEncBase in encBase:
return self._dropFloatingPoint(m, value.binEncBase, e)
elif self.binEncBase in encBase:
return self._dropFloatingPoint(m, self.binEncBase, e)
# auto choosing base 2/8/16
mantissa = [m, m, m]
exponenta = [e, e, e]
sign = 1
encbase = 2
e = float('inf')
for i in range(3):
(sign,
mantissa[i],
encBase[i],
exponenta[i]) = self._dropFloatingPoint(mantissa[i], encBase[i], exponenta[i])
if abs(exponenta[i]) < abs(e) or (abs(exponenta[i]) == abs(e) and mantissa[i] < m):
e = exponenta[i]
m = int(mantissa[i])
encbase = encBase[i]
return sign, m, encbase, e
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
if value.isPlusInf:
return (0x40,), False, False
if value.isMinusInf:
return (0x41,), False, False
m, b, e = value
if not m:
return null, False, True
if b == 10:
return str2octs('\x03%dE%s%d' % (m, e == 0 and '+' or '', e)), False, True
elif b == 2:
fo = 0x80 # binary encoding
ms, m, encbase, e = self._chooseEncBase(value)
if ms < 0: # mantissa sign
fo |= 0x40 # sign bit
# exponenta & mantissa normalization
if encbase == 2:
while m & 0x1 == 0:
m >>= 1
e += 1
elif encbase == 8:
while m & 0x7 == 0:
m >>= 3
e += 1
fo |= 0x10
else: # encbase = 16
while m & 0xf == 0:
m >>= 4
e += 1
fo |= 0x20
sf = 0 # scale factor
while m & 0x1 == 0:
m >>= 1
sf += 1
if sf > 3:
raise error.PyAsn1Error('Scale factor overflow') # bug if raised
fo |= sf << 2
eo = null
if e == 0 or e == -1:
eo = int2oct(e & 0xff)
else:
while e not in (0, -1):
eo = int2oct(e & 0xff) + eo
e >>= 8
if e == 0 and eo and oct2int(eo[0]) & 0x80:
eo = int2oct(0) + eo
if e == -1 and eo and not (oct2int(eo[0]) & 0x80):
eo = int2oct(0xff) + eo
n = len(eo)
if n > 0xff:
raise error.PyAsn1Error('Real exponent overflow')
if n == 1:
pass
elif n == 2:
fo |= 1
elif n == 3:
fo |= 2
else:
fo |= 3
eo = int2oct(n & 0xff) + eo
po = null
while m:
po = int2oct(m & 0xff) + po
m >>= 8
substrate = int2oct(fo) + eo + po
return substrate, False, True
else:
raise error.PyAsn1Error('Prohibited Real base %s' % b)
class SequenceEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
value.verifySizeSpec()
namedTypes = value.componentType
substrate = null
idx = len(value)
while idx > 0:
idx -= 1
if namedTypes:
if namedTypes[idx].isOptional and not value[idx].isValue:
continue
if namedTypes[idx].isDefaulted and value[idx] == namedTypes[idx].asn1Object:
continue
substrate = encodeFun(value[idx], defMode, maxChunkSize) + substrate
return substrate, True, True
class SequenceOfEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
value.verifySizeSpec()
substrate = null
idx = len(value)
while idx > 0:
idx -= 1
substrate = encodeFun(value[idx], defMode, maxChunkSize, ifNotEmpty=False) + substrate
return substrate, True, True
class ChoiceEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
return encodeFun(value.getComponent(), defMode, maxChunkSize, ifNotEmpty=False), True, True
class AnyEncoder(OctetStringEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize, ifNotEmpty=False):
return value.asOctets(), defMode == False, True
tagMap = {
eoo.endOfOctets.tagSet: EndOfOctetsEncoder(),
univ.Boolean.tagSet: BooleanEncoder(),
univ.Integer.tagSet: IntegerEncoder(),
univ.BitString.tagSet: BitStringEncoder(),
univ.OctetString.tagSet: OctetStringEncoder(),
univ.Null.tagSet: NullEncoder(),
univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
univ.Enumerated.tagSet: IntegerEncoder(),
univ.Real.tagSet: RealEncoder(),
# Sequence & Set have same tags as SequenceOf & SetOf
univ.SequenceOf.tagSet: SequenceOfEncoder(),
univ.SetOf.tagSet: SequenceOfEncoder(),
univ.Choice.tagSet: ChoiceEncoder(),
# character string types
char.UTF8String.tagSet: OctetStringEncoder(),
char.NumericString.tagSet: OctetStringEncoder(),
char.PrintableString.tagSet: OctetStringEncoder(),
char.TeletexString.tagSet: OctetStringEncoder(),
char.VideotexString.tagSet: OctetStringEncoder(),
char.IA5String.tagSet: OctetStringEncoder(),
char.GraphicString.tagSet: OctetStringEncoder(),
char.VisibleString.tagSet: OctetStringEncoder(),
char.GeneralString.tagSet: OctetStringEncoder(),
char.UniversalString.tagSet: OctetStringEncoder(),
char.BMPString.tagSet: OctetStringEncoder(),
# useful types
useful.ObjectDescriptor.tagSet: OctetStringEncoder(),
useful.GeneralizedTime.tagSet: OctetStringEncoder(),
useful.UTCTime.tagSet: OctetStringEncoder()
}
# Put in ambiguous & non-ambiguous types for faster codec lookup
typeMap = {
univ.Boolean.typeId: BooleanEncoder(),
univ.Integer.typeId: IntegerEncoder(),
univ.BitString.typeId: BitStringEncoder(),
univ.OctetString.typeId: OctetStringEncoder(),
univ.Null.typeId: NullEncoder(),
univ.ObjectIdentifier.typeId: ObjectIdentifierEncoder(),
univ.Enumerated.typeId: IntegerEncoder(),
univ.Real.typeId: RealEncoder(),
# Sequence & Set have same tags as SequenceOf & SetOf
univ.Set.typeId: SequenceEncoder(),
univ.SetOf.typeId: SequenceOfEncoder(),
univ.Sequence.typeId: SequenceEncoder(),
univ.SequenceOf.typeId: SequenceOfEncoder(),
univ.Choice.typeId: ChoiceEncoder(),
univ.Any.typeId: AnyEncoder(),
# character string types
char.UTF8String.typeId: OctetStringEncoder(),
char.NumericString.typeId: OctetStringEncoder(),
char.PrintableString.typeId: OctetStringEncoder(),
char.TeletexString.typeId: OctetStringEncoder(),
char.VideotexString.typeId: OctetStringEncoder(),
char.IA5String.typeId: OctetStringEncoder(),
char.GraphicString.typeId: OctetStringEncoder(),
char.VisibleString.typeId: OctetStringEncoder(),
char.GeneralString.typeId: OctetStringEncoder(),
char.UniversalString.typeId: OctetStringEncoder(),
char.BMPString.typeId: OctetStringEncoder(),
# useful types
useful.ObjectDescriptor.typeId: OctetStringEncoder(),
useful.GeneralizedTime.typeId: OctetStringEncoder(),
useful.UTCTime.typeId: OctetStringEncoder()
}
class Encoder(object):
supportIndefLength = True
# noinspection PyDefaultArgument
def __init__(self, tagMap, typeMap={}):
self.__tagMap = tagMap
self.__typeMap = typeMap
def __call__(self, value, defMode=True, maxChunkSize=0, ifNotEmpty=False):
if not defMode and not self.supportIndefLength:
raise error.PyAsn1Error('Indefinite length encoding not supported by this codec')
if debug.logger & debug.flagEncoder:
logger = debug.logger
else:
logger = None
if logger:
logger('encoder called in %sdef mode, chunk size %s for type %s, value:\n%s' % (not defMode and 'in' or '', maxChunkSize, value.prettyPrintType(), value.prettyPrint()))
tagSet = value.tagSet
if len(tagSet) > 1:
concreteEncoder = explicitlyTaggedItemEncoder
else:
try:
concreteEncoder = self.__typeMap[value.typeId]
except KeyError:
# use base type for codec lookup to recover untagged types
baseTagSet = tag.TagSet(value.tagSet.baseTag, value.tagSet.baseTag)
try:
concreteEncoder = self.__tagMap[baseTagSet]
except KeyError:
raise error.PyAsn1Error('No encoder for %s' % (value,))
if logger:
logger('using value codec %s chosen by %s' % (concreteEncoder.__class__.__name__, tagSet))
substrate = concreteEncoder.encode(
self, value, defMode, maxChunkSize, ifNotEmpty=ifNotEmpty
)
if logger:
logger('built %s octets of substrate: %s\nencoder completed' % (len(substrate), debug.hexdump(substrate)))
return substrate
#: Turns ASN.1 object into BER octet stream.
#:
#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: walks all its components recursively and produces a BER octet stream.
#:
#: Parameters
#: ----------
# value: any pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
#: A pyasn1 object to encode
#:
#: defMode: :py:class:`bool`
#: If `False`, produces indefinite length encoding
#:
#: maxChunkSize: :py:class:`int`
#: Maximum chunk size in chunked encoding mode (0 denotes unlimited chunk size)
#:
#: Returns
#: -------
#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
#: Given ASN.1 object encoded into BER octetstream
#:
#: Raises
#: ------
#: : :py:class:`pyasn1.error.PyAsn1Error`
#: On encoding errors
encode = Encoder(tagMap, typeMap)
| |
#!/usr/bin/python3
"""
.. moduleauthor:: Albert Heinle<albert.heinle@gmail.com>
"""
#import threading
import multiprocessing
import constants
from nltk.tokenize import word_tokenize
import os
import logging
class Matcher(multiprocessing.Process):
"""
This is one of an army of in parallel running Threads.
It receives a reference to a complete dictionary of products
(hashed by the manufacturer), and a subset of the listings.
It tries to match the listings to the products and writes a file
representing the successful matches.
"""
def __init__(self, beginIndex, products_dict,
listings_chunk,result_path = constants.result_path):
"""
The constructor of Matcher. It gets passed a beginIndex
(unique for each thread; will not be checked),
which represents the index (with
respect to the total number of listings) of the
first listing it tries to match, a dictionary of products
(hashed by manufacturer) and a list of listings it tries
to match. The beginindex is also part of a filename that is
written, which contains all the found matches.
As an optional parameter, the Matcher gets a path to a folder
where the final results should be stored.
:param beginIndex: unique beginning index of the chunk this
instance tries to match with respect to all
listings
:type beginIndex: Int (non-negative)
:param products_dict: A dictionary containing all products,
hashed by the manufacturer
:type products_dict: dict(String:[Product])
:param listings_chunk: A list of Listing instances that this
class is trying to match.
:type listings_chunk: list(Listing)
:param result_path: The path to the resulting files
:type result_path: String
"""
multiprocessing.Process.__init__(self)
self.__beginIndex = beginIndex
self.__products_dict = products_dict
self.__listings_chunk = listings_chunk
self.__matches_dict = {}
self.__result_path = result_path
logging.basicConfig(level = logging.DEBUG,
handlers = [
logging.FileHandler(
os.path.join(result_path,"Matcher.log"),
mode="w")])
def __findPositionInHashTable(self,listing):
"""
Given a listing of type Listing, this function finds the
respective position of possible products (based on the
manufacturer) in the given products dictionary.
It returns an empty string if no such position can be
determined, or the key-value.
:param listing: The listing we try to find a position for
:type listing: Listing
:returns: The String representing the product in the
Hash table
:rtype: String
"""
manu_lower = listing.getManufacturer().lower()
if manu_lower =="":
#In this case, we should "try" to find a possible
#manufacturer based on Heuristics
count = 0
tempRes = ""
title_tt = word_tokenize(listing.getTitle())
title_tt = list(map(lambda x: x.lower, title_tt))
for p in self.__products_dict:
if p in title_tt:
count += 1
tempRes = p
if count != 1:
#don't risk ambiguity
return ""
return tempRes
if manu_lower in self.__products_dict:
return manu_lower
else:
manu_lower_wt = word_tokenize(manu_lower)
matches = []
for m in manu_lower_wt:
if m in self.__products_dict:
matches.append(m)
if (len(matches)!=1):
#we don't want to take the risk in case of ambiguity
return ""
return matches[0]
def __getMatchingsForListing(self, listing):
"""
Given a listing of type Listing, this function matches the
listing to possible products in the internal dictionary of
products. It returns a list of such products (or an empty list
if there are none).
:param listing: The listing we try to find a position for
:type listing: Listing
:returns: A tuple consisting of the matching value and
the product
:rtype: (Int, Product)
"""
hashEntry = self.__findPositionInHashTable(listing)
if hashEntry == "":
return []
potProducts = self.__products_dict[hashEntry]
matches = []
for p in potProducts:
match_outp = Matcher.is_match(p,listing)
if match_outp>=1:
matches.append((match_outp,p))
return matches
def __filterMatchings(self, matchings):
"""
This function gets called when there is more than one matching
in the list of matchings in the run function, and it tries to
filter out one specific matching out of it. If all the
matchings had a matching factor of 1, empty list is returned.
ASSUMPTIONS:
- There is more than one matching in the list
:param matchings: The list of matchings
:type matchings: [(Int, Product)]
:returns: The list of most likely matchings
:rtype: [(Int, Product)]
"""
twoMatches = list(filter(lambda x: x[0] == 2,matchings))
if len(twoMatches)<=1:
#Easy case: we already found it, or there was no clear match
return twoMatches
#find the most sound match
result = []
for i in range(len(twoMatches)):
for j in range(i+1,len(twoMatches)):
if (twoMatches[i][1].getModel() in
twoMatches[j][1].getModel()):
result.append(twoMatches[j])
elif (twoMatches[j][1].getModel() in
twoMatches[i][1].getModel()):
result.append(twoMatches[i])
if result == []:
return twoMatches
else:
return result
def run(self):
"""
Tries to find matches of different products with listings.
When done, for each product name s, if the product had
matches, it writes a file named s_i.txt, where i is the class
variable representing the begin index (unique to the thread),
where a record of the match is given.
"""
for l in self.__listings_chunk:
matching = self.__getMatchingsForListing(l)
if len(matching)!=1:
matching = self.__filterMatchings(matching)
if (len(matching)!=1):
if (len(matching)==0):
logging.debug("Could not find match for\
listing %s"%str(l))
else:
logging.debug("More than one match for listing\
%s:"%str(l))
for p in matching:
logging.debug(str(p[1]))
continue #in this case, ambiguity was there
newId = matching[0][1].getName()
if newId in self.__matches_dict:
self.__matches_dict[newId].append(l)
else:
self.__matches_dict[newId] = [l]
self.__writeMatchesToFiles()
def __writeMatchesToFiles(self):
"""
Assuming that the matches internal dictionary has already been
filled, this function writes the files containing the matches
for each product. In particular, for each product name s, a
file named s_i.txt, where i is the unique begin index of this
thread, will be generated containing all the listings that
match s. This file will only be generated if s had a matching
though.
"""
for s in self.__matches_dict:
l_matchings = self.__matches_dict[s]
l_matchings = ",".join(map(lambda x: x.toJSON(), l_matchings))
f = open(os.path.join(self.__result_path,
"%s_%d.txt"%(s,self.__beginIndex)),
encoding='utf-8',
mode='w')
f.write(l_matchings)
f.close()
def isInTokenizedString(s,ts):
"""
This function takes in a string s and a list of strings ts and
determines if s can be found in ts (maybe by even merging some
consecutive tokens).
Assumptions:
- the elements in ts are all lowercase and don't have the
symbols '-',' ' and '_'
:param s: The string we want to find
:type s: String
:param ts: The list of tokens where we want to find s
:type ts: [String]
:returns: True, if s in ts, False otherwise
:rtype: Bool
"""
s = s.lower()
s = "".join(list(filter(lambda x: not x in "_- ",s)))
if s in ts:
return True
tempString = ""
for i in range(len(ts)):
tempString += ts[i]
if tempString==s:
return True
if not (tempString in s):
tempString = ""
return False
def is_match(product, listing):
"""
This function is passed a variable product of type Product,
a variable listing of type Listing. It returns several possible
values.
- -1: No way it is a match
- 0: It could or could not be a match
- 1: Manufacturer and Model are given
- 2: Manufacturer, Model and family are found
The algorithm idea is the following:
- Tokenize the words in the title.
- If you find e.g. the words "for" or "with", delete this
word and anything after it, as it has nothing to do with the product.
The word "for" would describe what the product is used for,
and model numbers appearing afterwards would deceive, since
they can name other products.
The word "with" describes certain extras, that are usually
not provided in product descriptions.
A complete list of values as given in lowercase are provided
in constants.py.
- In the remaining tokens, try to find
- the manufacturer
- the family (if given)
- the model
While doing this, one has to keep in mind that there are
different ways to write a model name. e.g. a "Canon
PowerShot SX130 IS" may appear as "Canon powershot
SX-130-IS". For this implementation, we assume that we
should remove any letters like '_', '-' and ' ' and then try
to find the model number or so. We return 1, if we find the
manufacturer and the model. If furthermore can find the family,
we return 2 in this simplistic model.
We return 0 if we find the manufacturer, the family (if
given), but not the model. For all the other cases we just
return -1.
Assumptions:
- The product and the listing manufacturer is the same
- 0 should technically never appear. But if it does, we should
log that.
:param product: A product that is a potential match
:type product: Product
:param listing: A listing that potentially matches product
:type listing: Listing
:returns: A number representing the likeliness of the
listing being matched to the product
:rtype: Bool
"""
token_title = word_tokenize(listing.getTitle())
#finding words like "for" and "with"
token_title_lc = list(map(lambda x: x.lower(),token_title))
minIndex = len(token_title_lc)
for i in constants.separator_words:
if i in token_title_lc:
ind_of_i = token_title_lc.index(i)
if ind_of_i < minIndex:
minIndex = ind_of_i
if minIndex != len(token_title_lc):
token_title = token_title[:minIndex]
token_title_lc = token_title_lc[:minIndex]
#make a consecutive string out of it
merged_tt = list(map(lambda x:
"".join(list(filter(lambda y: not y in "_- ",x))),
token_title_lc))
#Now we try to find the model
model = product.getModel().lower()
model_found = Matcher.isInTokenizedString(model,merged_tt)
#Now we try to find the manufacturer
manufacturer = product.getManufacturer().lower()
manu_found = Matcher.isInTokenizedString(manufacturer,merged_tt)
#if the family is given, we also try to find it:
family = product.getFamily().lower()
#family = "".join(list(filter(lambda x: not x in "_- ",family)))
if family!="":
family_found = Matcher.isInTokenizedString(family,merged_tt)
else:
family_found = False
if (family_found and
model_found and
manu_found):
return 2
if (manu_found and
model_found):
return 1
if manu_found and family_found:
return 0
return -1
| |
#!/usr/bin/env python
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Python MadMimi client library."""
__author__ = ('tav@espians.com (tav),'
'jordan.bouvier@analytemedia.com (Jordan Bouvier)')
__maintainer__ = 'jordan.bouvier@analytemedia.com (Jordan Bouvier)'
import csv
import logging
try:
from cStringIO import StringIO
except ImportError:
#from StringIO import StringIO
from io import StringIO
try:
from urllib import quote, urlencode
except ImportError:
from urllib.parse import quote, urlencode
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
from yaml import dump, safe_dump
DEFAULT_CONTACT_FIELDS = ('first name', 'last_name', 'email', 'tags')
def parse_lists(response):
tree = ElementTree.ElementTree()
lists = {}
tree.parse(StringIO(response))
for elem in list(tree.getiterator('list')):
lists[elem.attrib['name']] = MailingList(elem.attrib['id'],
elem.attrib['name'],
elem.attrib['subscriber_count'])
return lists
class MailingList(object):
"""The main mailing list object."""
def __init__(self, list_id=0, list_name="", subscribers=0):
self.subscribers = subscribers
self.id = list_id
self.name = list_name
def __unicode__(self):
return u"<MailingList: %s>" % self.name
def __repr__(self):
return "<MailingList: %s>" % self.name
class MadMimi(object):
"""
The client is straightforward to use:
>>> mimi = MadMimi('user@foo.com', 'account-api-key')
You can use it to list existing lists:
>>> mimi.lists()
{'test': <MailingList: test>}
>>> mimi.lists()["test"].subscribers
3
>>> mimi.lists()["test"].name
"test"
Delete any of them:
>>> mimi.delete_list('test')
Create new ones:
>>> mimi.add_list('ampify')
Add new contacts:
>>> mimi.add_contact(['Tav', 'Espian', 'tav@espians.com'])
Subscribe contacts to a list:
>>> mimi.subscribe('tav@espians.com', 'ampify')
See what lists a contact is subscribed to:
>>> mimi.subscriptions('tav@espians.com')
<lists>
<list subscriber_count="1" name="ampify" id="77461"/>
</lists>
And, of course, unsubscribe a contact from a list:
>>> mimi.unsubscribe('tav@espians.com', 'ampify')
>>> mimi.subscriptions('tav@espians.com')
<lists>
</lists>
Send a transactional email:
>>> mimi.send_message('John Doe','johndoe@gmail.com','Promotion Name',
... 'Subject of the message','sender@email.com',
... {'var1':'This will go to the template'})
'1146680279'
Send an email to a list:
>>> mimi.send_message_to_list('List Name', 'Promotion Name',
... {'var1':'This will go to the template'})
'1223645'
"""
base_url = 'http://api.madmimi.com/'
secure_base_url = 'https://api.madmimi.com/'
def __init__(self, username, api_key):
self.username = username
self.api_key = api_key
self.urlopen = urlopen
self.logger = logging.getLogger('madmimi')
self.logger.setLevel(logging.WARNING)
def _get(self, method, **params):
"""Issue a GET request to Madmimi.
Arguments:
method: The path to the API method you are accessing, relative
to the site root.
is_secure: If is_secure is True, the GET request will be issued
to MadMimi's secure server.
Returns:
The result of the HTTP request as a string.
"""
is_secure = params.get('is_secure')
if is_secure:
url = self.secure_base_url
else:
url = self.base_url
params['username'] = self.username
params['api_key'] = self.api_key
url = url + method + '?' + urlencode(params)
self.logger.debug('get url: %s' % url)
response = self.urlopen(url).read()
self.logger.debug('response: %s' % response)
return response
def _post(self, method, **params):
"""Issue a POST request to Madmimi.
Arguments:
method: The path to the API method you are accessing, relative
to the site root.
is_secure: If is_secure is True, the GET request will be issued
to MadMimi's secure server.
Returns:
The result of the HTTP request as a string.
"""
is_secure = params.get('is_secure')
if is_secure:
url = self.secure_base_url + method
else:
url = self.base_url + method
params['username'] = self.username
params['api_key'] = self.api_key
if params.get('sender'):
params['from'] = params['sender']
self.logger.debug('post url: %s' % url)
self.logger.debug('params: %s' % params)
response = self.urlopen(url, urlencode(params).encode('utf-8')).read()
self.logger.debug('response: %s' % response)
return response
def lists(self, as_xml=False):
"""Get a list of audience lists.
Arguments:
as_xml: If true, the result will be the raw XML response. If False
the result will be a python dictionary of lists.
Default is True. (Optional)
Returns:
The raw XML response or a dictionary of list names and objects.
{'list name': <list object>, 'list2 name': <list object>}
"""
response = self._get('audience_lists/lists.xml')
if as_xml:
return response
else:
return parse_lists(response)
def add_list(self, name):
"""Add a new audience list.
Arguments:
name: The name of the audience list to add.
Returns:
Nothing. The API doesn't provide a response.
"""
self._post('audience_lists', name=name)
def delete_list(self, name):
"""Delete an audience list.
Arguments:
name: The name of the audience list to delete.
Returns:
Nothing. The API doesn't provide a response.
"""
self._post('audience_lists/%s' % quote(name), _method='delete')
def add_contacts(self, contacts_data, fields=DEFAULT_CONTACT_FIELDS,
audience_list=None):
"""Add audience members to your database.
Arguments:
contacts_data: A list of tuples containting contact data.
fields: A tuple containing the fields that will be represented.
Returns:
Nothing. The API doesn't provide a response.
"""
contacts = []
contacts.append((fields))
contacts.extend(contacts_data)
csvdata = StringIO()
writer = csv.writer(csvdata)
[writer.writerow(row) for row in contacts]
self._post('audience_members', csv_file=csvdata.getvalue(),
audience_list=audience_list)
def subscribe(self, email, audience_list):
"""Add an audience member to an audience list.
Arguments:
email: The email address to add to a list.
audience_list: The audience list to add the email address to.
Return:
Nothing. The API doesn't provide a response.
"""
url = 'audience_lists/%s/add' % quote(audience_list)
self._post(url, email=email)
def unsubscribe(self, email, audience_list):
"""Remove an audience member from an audience list.
Arguments:
email: The email address to add to a list.
audience_list: The audience list to add the email address to.
Returns:
Nothing. The API doesn't provide a response.
"""
url = 'audience_lists/%s/remove' % quote(audience_list)
self._post(url, email=email)
def subscriptions(self, email, as_xml=False):
"""Get an audience member's current subscriptions.
Arguments:
email: The email address to look up.
as_xml: If true, the result will be the raw XML response. If False
the result will be a python dictionary of lists.
Default is True. (Optional)
Returns:
The raw XML response or a dictionary of list names and objects of which
the person is a member.
{'list name': <list object>, 'list2 name': <list object>}
"""
response = self._get('audience_members/%s/lists.xml' % quote(email))
if as_xml:
return response
else:
return parse_lists(response)
def send_message(self, name, email, promotion, subject, sender, body={},
raw_html=None, raw_plain_text=None):
"""Sends a message to a user.
Arguments:
name: Name of the person you are sending to.
email: Email address of the person you are sending to.
promotion: Name of the Mad Mimi promotion to send.
subject: Subject of the email.
sender: Email address the email should appear to be from.
Only one of body, raw_html or raw_plain_text should be provided.
Order of preference is html, plain text, body.
body: Optional. Dict holding variables for the promotion template.
{'variable': 'Replcement value'}
raw_html: Optional. If you want to send a message where the
promotion doesn't already exist. Make sure the promotion
name is unique.
raw_plain_text: Optional. Same as raw_html except it is plain
text.
Returns:
The transaction id of the message if successful.
The error if unsuccessful.
"""
recipients = "%s <%s>" % (name, email)
if raw_html:
post = self._post('mailer', promotion_name=promotion,
recipients=recipients, subject=subject, sender=sender,
raw_html=raw_html, is_secure=True)
elif raw_plain_text:
post = self._post('mailer', promotion_name=promotion,
recipients=recipients, subject=subject, sender=sender,
raw_plain_text=raw_plain_text, is_secure=True)
else:
# The YAML dump will fail if it encounters non-strings
for item, value in body.iteritems():
body[item] = str(value)
body = safe_dump(body) # to avoid !!python/str tags by dump(body)
post = self._post('mailer', promotion_name=promotion,
recipients=recipients, subject=subject, sender=sender,
body=body, is_secure=True)
return post
def send_message_to_list(self, list_name, promotion, body={}):
"""Send a promotion to a subscriber list.
Arguments:
list_name: Name of the subscriber list to send the promotion to.
promotion: Name of the Mad Mimi promotion to send.
body: Dict holding variables for the promotion template.
{'variable': 'Replcement value'}
Returns:
The transaction id of the message if successful.
The error if unsuccessful.
"""
# The YAML dump will fail if it encounters non-strings
for item, value in body.iteritems():
body[item] = str(value)
body = safe_dump(body) # to avoid !!python/str tags by dump(body)
return self._post('mailer/to_list', promotion_name=promotion,
list_name=list_name, body=body, is_secure=True)
def message_status(self, transaction_id):
"""Get the status of a message.
Arguments:
transaction_id: The transaction id of the message you want to
get the status for.
Returns:
One of the following strings:
ignorant
sending
failed
sent
received
clicked_through
bounced
retried
retry_failed
abused
"""
url = 'mailers/status/%s' % transaction_id
return self._get(url, is_secure=True)
def supressed_since(self, date):
"""Get a list of email addresses that have opted out since date.
Arguments:
date: Python datetime to retrieve opt outs since.
"""
url = 'audience_members/suppressed_since/%s.txt' % date.strftime('%s')
return self._get(url)
def promotion_stats(self):
"""Get an XML document containing stats for all your promotions."""
return self._get('promotions.xml')
| |
#!/usr/bin/env python
#######################
'''
suppressreflections.py regiondir image1 image2 image3...
Given the locations of reflection rings around stars, this module removes and masks reflections and rings.
Each image needs a weight and flag file.
Star reflections are marked by ds9 circle regions. Stars are seperated by a non-circle region in the file.
'''
##########################
import unittest, sys, re, os, glob, tempfile, subprocess, regionfile as rf
import wcsregionfile as wrf
import astropy, astropy.io.fits as pyfits
import leastsq
import numpy as np
from optparse import OptionParser
#########################
__cvs_id__ = "$Id: suppressreflections.py,v 1.20 2010-05-12 19:35:29 dapple Exp $"
#################################################
### MAIN
#################################################
from adam_quicktools_ArgCleaner import ArgCleaner
argv = ArgCleaner(sys.argv)
def main(argv = argv):
parser = OptionParser()
parser.add_option('-m', '--mask', dest='mask', default=False, action='store_true')
options, args = parser.parse_args(argv)
regiondir = args[0]
images = args[1:]
print options.mask
if options.mask:
print 'masking!'
for imagefile in images:
print 'Processing %s' % imagefile
weightfile, flagfile, regionbase = findAssociatedFiles(imagefile)
regionfile = os.path.join(regiondir, regionbase)
outimage, outweight, outflag = findOutfiles(imagefile, 'R')
processMask(imagefile, weightfile, flagfile, regionfile,
outimage, outweight, outflag)
else:
for imagefile in images:
print 'Processing %s' % imagefile
weightfile, flagfile, regionbase = findAssociatedFiles(imagefile)
print ' weightfile=',weightfile , ' flagfile=',flagfile , ' regionbase=',regionbase
regionfile = os.path.join(regiondir, regionbase)
print ' regionfile=',regionfile
outimage, outweight, outflag = findOutfiles(imagefile, 'R')
processImage(imagefile, weightfile, flagfile, regionfile,
outimage, outweight, outflag)
##################################################
### SITE SPECIFIC ASSUMPTIONS
##################################################
__sextractor_config_file__ = "photconf/suppressreflections.config.sex"
###########################
def findAssociatedFiles(filename):
dirname, basefilename = os.path.split(filename)
base, ext = os.path.splitext(basefilename)
weightdir = '%s/../WEIGHTS' % dirname
weightfile = os.path.normpath(os.path.join(weightdir, '%s.weight.fits' % base))
flagfile = os.path.normpath(os.path.join(weightdir, '%s.flag.fits' % base))
match = re.match('(.+?_\d+)\w*', base)
if match is None:
raise ValueError('Cannot recognize filename: %s' % base)
root = match.group(1)
regionfile = '%s.reg' % root
return weightfile, flagfile, regionfile
################################
def findOutfiles(imagefile, extension):
dirname, basefilename = os.path.split(imagefile)
base, ext = os.path.splitext(basefilename)
newbase = '%sR' % base
outimage = '%s/%s.fits' % (dirname, newbase)
weightdir = '%s/../WEIGHTS' % dirname
outweight = os.path.normpath(os.path.join(weightdir, '%s.weight.fits' % newbase))
outflag = os.path.normpath(os.path.join(weightdir, '%s.flag.fits' % newbase))
return outimage, outweight, outflag
##################################################
# USER FUNCTIONS
##################################################
def createLinks(imagefile, weightfile, flagfile,
outimage, outweight, outflag):
if os.path.exists(outimage):
os.remove(outimage)
os.symlink(imagefile, outimage)
if os.path.exists(outweight):
os.remove(outweight)
os.symlink(weightfile, outweight)
if os.path.exists(outflag):
os.remove(outflag)
os.symlink(flagfile, outflag)
return
def processImage(imagefile, weightfile, flagfile, regionfile,
outimage, outweight, outflag):
'''
@brief Given input and output filenames, processes star subtraction and masking
@param imagefile filename of input image
@param weightfile filename of input weight image
@param flagfile filename of input flag image
@param regionfile filename of input region file detailing star reflection circles
@param outimage filename for star subtracted image
@param outweight filename for masked weight file
@param outflag filename for masked flag file
@returns none
'''
if not os.path.exists(regionfile):
print 'Regionfile does not exist'
createLinks(imagefile, weightfile, flagfile, outimage, outweight, outflag)
return
image, imageheader = loadImage(imagefile)
stars = readRegionFile(open(regionfile).readlines(), image.shape)
if len(stars) == 0:
print 'No Stars in region file'
createLinks(imagefile, weightfile, flagfile, outimage, outweight, outflag)
return
weights, weightheader = loadImage(weightfile)
flags, flagheader = loadImage(flagfile)
objectMask = createObjectMask(imagefile, weightfile, flagfile)
mask = flags.copy()
mask[objectMask > 0] = 1
newimage = subtractStars(image, mask, stars, weight = weights, buffer = 20)
newweight, newflag = maskStars(weights, flags, stars)
saveImage(outimage, newimage, imageheader)
saveImage(outweight, newweight)
saveImage(outflag, newflag)
##########################
def processMask(imagefile, weightfile, flagfile, regionfile,
outimage, outweight, outflag):
'''
@brief Given input and output filenames, processes star masking only
@param imagefile filename of input image
@param weightfile filename of input weight image
@param flagfile filename of input flag image
@param regionfile filename of input region file detailing star reflection circles
@param outimage filename for star subtracted image
@param outweight filename for masked weight file
@param outflag filename for masked flag file
@returns none
'''
weights, weightheader = loadImage(weightfile)
flags, flagheader = loadImage(flagfile)
mask = flags
stars = readRegionFile(open(regionfile).readlines(), weights.shape)
if len(stars) == 0:
if os.path.exists(outweight):
os.remove(outweight)
os.symlink(weightfile, outweight)
if os.path.exists(outflag):
os.remove(outflag)
os.symlink(flagfile, outflag)
return
newweight, newflag = maskStars(weights, flags, stars)
saveImage(outweight, newweight)
saveImage(outflag, newflag)
##########################
def createObjectMask(imagefile, weightfile, flagfile, workdir=None):
'''
@brief Runs source extractor to generate a mask for objects
@param imagefile filename of input image
@param weightfile filename of input weight image
@param flagfile filename of input flag image
@param workdir place to write temporary files
@returns an image of 0,1's where 1 is a pixel occupied by an object in the original image
'''
objects = None
try:
deleteWorkdir = False
if workdir is None:
workdir = tempfile.mkdtemp()
deleteWorkdir = True
basename, ext = os.path.splitext(os.path.basename(imagefile))
objectsfile = os.path.join(workdir, '%s_objs.fits' % basename)
cmd = 'sex -c %(config)s %(imagefile)s -WEIGHT_IMAGE %(weightfile)s -FLAG_IMAGE %(flagfile)s -CHECKIMAGE_NAME %(objectsfile)s' % \
{'config' : __sextractor_config_file__,
'imagefile' : imagefile,
'weightfile' : weightfile,
'flagfile' : flagfile,
'objectsfile' : objectsfile}
subprocess.check_call([cmd], shell=True)
objects = pyfits.open(objectsfile)[0].data
objects[objects > 0] = 1
finally:
if deleteWorkdir and os.path.exists(workdir):
toDelete = glob.glob('%s/*' % workdir)
for file in toDelete:
os.remove(file)
os.rmdir(workdir)
else:
if os.path.exists(objectsfile):
os.remove(objectsfile)
return objects
##########################
commentline = re.compile('^#')
def readRegionFile(txtlines, imageSize):
'''
@brief Parses a list of strings into grouped stars
@param txtlines a list of strings
@param imageSize pyfits shape of associated image (NAXIS2,NAXIS1)
@returns a list of stars, where each star is a list of Ring objects defining a star's reflections
'''
if type(txtlines) != type([]):
txtlines = txtlines.splitlines()
rings = [ Ring.fromCircle(x) for x in \
filter(lambda x: isinstance(x, rf.Circle), rf.parseRegionFile(txtlines))]
stars = groupRings(rings)
filteredStars = []
for star in stars:
bigRing = star[-1]
if not (bigRing.x + bigRing.r < 0 or \
bigRing.x - bigRing.r > imageSize[1] or \
bigRing.y + bigRing.r < 0 or \
bigRing.y - bigRing.r > imageSize[0]):
filteredStars.append(star)
return filteredStars
############################
def constructReflectionModel(image, mask, rings, weight = None, buffer=50, background_radii=100):
'''
@brief Constructs reflection model for a star
@param image input image to measure reflections from
@param mask image where 1 is a bad pixel or an object
@param rings a list of Ring objects characterizing one star
@param weight a weight file for an image
@param buffer radial distance around each ring to ignore when fitting the model
@returns model an image of the star reflection model and background in the input image
'''
X,Y = np.meshgrid(np.arange(image.shape[1]), np.arange(image.shape[0]))
if weight is None:
weight = np.ones_like(image)
mask = np.copy(mask)
mask[weight < 1e-6 ] = 1
dRs = [np.sqrt((X-r.x)**2 + (Y-r.y)**2) for r in rings]
######
select = np.logical_and(mask == 0, np.logical_and(dRs[-1] > rings[-1].r+buffer,
dRs[-1] < rings[-1].r + background_radii))
dr = dRs[-1][select]
pix = image[select]
curWeight = weight[select]
aveBackground = np.sum(curWeight * pix) / np.sum(curWeight)
#######
levels = []
for i in xrange(0, len(rings)-1):
select = np.logical_and(mask == 0,np.logical_and(dRs[i] > rings[i].r + buffer,
dRs[i+1] < rings[i+1].r - buffer))
dr = dRs[i][select]
if dr.size == 0:
levels.append((0,0))
continue
pix = image[select] - aveBackground
curWeight = weight[select]
params, isCon = leastsq.linear_leastsq(dr, pix, 1./np.sqrt(curWeight))
levels.append(params)
model = np.zeros_like(image)
for i in xrange(1,len(rings)):
insideRing = np.logical_and(dRs[i-1] > rings[i-1].r, dRs[i] < rings[i].r)
model[insideRing] = dRs[i-1][insideRing]*levels[i-1][0] + levels[i-1][1]
return model
#############################
def maskStars(weight, flag, stars, buffer=50):
'''
@brief masks out ring features in a weight and flag image
@param weight weight image to mask (numpy 2d array)
@param flag flag image to mask (numpy 2d array)
@param stars list of stars, where each star is a list of Ring objects
@param buffer radial range around each ring to mask
@returns newweight, newflag masked weight and flag images
'''
weight = weight.copy()
flag = flag.copy()
for rings in stars:
X,Y = np.meshgrid(np.arange(weight.shape[1]), np.arange(weight.shape[0]))
dRs = [np.sqrt((X-ring.x)**2 + (Y-ring.y)**2) for ring in rings]
inRing1 = dRs[0] < (rings[0].r + buffer)
weight[inRing1] = 0
flag[inRing1] = 1
for dR, ring in zip(dRs[1:], rings[1:]):
inRing = np.logical_and(dR > (ring.r - buffer), dR < (ring.r + buffer))
#weight[inRing] = 0
flag[inRing] = 1
return weight, flag
#############################
def subtractStars(image, flag, stars, weight = None, buffer=50):
'''
@brief removes star reflections and background from image by iterating over stars
@params image image to model and remove star reflections from (numpy 2d array)
@params flag image of 0,1's where a 1 is a bad pixel or object
@params stars list of stars, where each star is a list of Ring objects marking reflections
@params buffer size around each ring to ignore in modeling
@returns image image with star reflections (and background) removed
'''
if weight is None:
weight = np.ones_like(image)
for i in xrange(len(stars)):
otherstars = stars[0:i] + stars[i+1:]
newweight, newflag = maskStars(weight, flag, otherstars, buffer)
model = constructReflectionModel(image, newflag, stars[i], weight = newweight, buffer=buffer)
image = image - model
return image
########################################################
########################################################
### UTILITY CLASSES
########################################################
class Ring(rf.Circle):
def __init__(self, x, y, r, **keywords):
rf.Circle.__init__(self, (x,y),r,**keywords)
def __getattr__(self, name):
if name == 'x':
return self.center[0]
if name == 'y':
return self.center[1]
if name == 'r':
return self.radius
if name == 'tag':
if 'tag' in self.attributes:
return self.attributes['tag']
else:
return None
return rf.Circle.__getattribute__(self, name)
syntax = re.compile('circle\((.+?),(.+?),(.+?)\)')
@classmethod
def fromString(cls, txt):
match = Ring.syntax.match(txt)
if match is None:
raise SyntaxError('Cannot translate into Ring: %s' % txt)
return Ring(float(match.group(1)),
float(match.group(2)),
float(match.group(3)))
@classmethod
def fromCircle(cls, acircle):
return cls(acircle.center[0], acircle.center[1], acircle.radius, **acircle.attributes)
def __eq__(self, other):
return self.x == other.x and \
self.y == other.y and \
self.r == other.r
def __repr__(self):
return '<Ring center=(%f,%f) radius=%f>' % (self.x, self.y, self.r)
def contains(self, aring):
dist = np.sqrt((self.x - aring.x)**2 + (self.y - aring.y)**2)
return dist <= self.r - aring.r
##########################################################
### UTILITY FUNCTIONS
##########################################################
def loadImage(filename):
image = pyfits.open(filename)[0]
return image.data, image.header
##########################
def saveImage(filename, image, header = None):
pyfits.PrimaryHDU(image, header=header).writeto(filename, overwrite=True)
############################
class RingConfusionException(Exception): pass
def groupRings(regions):
sortedRings = sorted(regions, lambda x,y: cmp(x.r, y.r))
stars = []
for ring in sortedRings:
matches = []
tagmatches = []
for star in stars:
if ring.contains(star[-1]):
if ring.tag is None:
matches.append(star)
continue
tagConflict = False
for starRing in star:
if starRing.tag is None:
continue
elif starRing.tag != ring.tag:
tagConflict = True
break
else:
tagmatches.append(star)
break
if not tagConflict:
matches.append(star)
nmatches = len(matches)
ntagmatches = len(tagmatches)
if nmatches == 0:
stars.append([ring])
elif nmatches == 1:
matches[0].append(ring)
elif ntagmatches == 1:
tagmatches[0].append(ring)
else:
raise RingConfusionException(ring)
sortedStars = sorted(stars, lambda x,y: cmp(len(x), len(y)), reverse=True)
return sortedStars
############################################################
### TESTING
############################################################
class TestGroupRings(unittest.TestCase):
def testOneStar(self):
rings = [Ring(919.4,3182.4,182.93164),
Ring(928.46667,3173.3333,69.593566),
Ring(910.33333,3182.4,351.79447)]
stars = [[Ring(928.46667,3173.3333,69.593566),
Ring(919.4,3182.4,182.93164),
Ring(910.33333,3182.4,351.79447)]]
self.assertEquals(stars, groupRings(rings))
def testTwoStars(self):
rings = [Ring(919.4,3182.4,182.93164),
Ring(910.33333,3182.4,351.79447),
Ring(846.86667,2176,148.45513),
Ring(837.8,2221.3333,305.67992),
Ring(828.73333,2212.2667,245.79471),
Ring(828.73333,2212.2667,528.04565),
Ring(928.46667,3173.3333,69.593566),]
stars = [[Ring(846.86667,2176,148.45513),
Ring(828.73333,2212.2667,245.79471),
Ring(837.8,2221.3333,305.67992),
Ring(828.73333,2212.2667,528.04565)],
[Ring(928.46667,3173.3333,69.593566),
Ring(919.4,3182.4,182.93164),
Ring(910.33333,3182.4,351.79447)]]
self.assertEquals(stars, groupRings(rings))
def testOverlappingMinorStar(self):
rings = [Ring(883.13333,2846.9333,80.859022),
Ring(901.26667,2837.8667,300.63893),
Ring(1390.8667,2846.9333,107.91074),
Ring(1399.9333,2810.6667,303.31271),
Ring(874.06667,2874.1333,666.68692)]
stars = [[Ring(883.13333,2846.9333,80.859022),
Ring(901.26667,2837.8667,300.63893),
Ring(874.06667,2874.1333,666.68692)],
[Ring(1390.8667,2846.9333,107.91074),
Ring(1399.9333,2810.6667,303.31271)]]
self.assertEquals(stars, groupRings(rings))
def testSimpleAntiGroup(self):
rings = [Ring(6658.8327,6419.3168,114.2888, tag='{Group 3}'),
Ring(5960.9529,6413.4276,1337.695, tag='{Group 2}')]
stars = [[Ring(6658.8327,6419.3168,114.2888, tag='{Group 3}')],
[Ring(5960.9529,6413.4276,1337.695, tag='{Group 2}')]]
self.assertEquals(stars, groupRings(rings))
def testSimpleProGroup(self):
rings = [Ring(6658.8327,6419.3168,114.2888),
Ring(6002.1779,6407.5383,174.68296, tag='{Group 2}'),
Ring(5960.9529,6413.4276,1337.695, tag='{Group 2}')]
stars = [[Ring(6002.1779,6407.5383,174.68296, tag='{Group 2}'),
Ring(5960.9529,6413.4276,1337.695, tag='{Group 2}')],
[Ring(6658.8327,6419.3168,114.2888)]]
self.assertEquals(stars, groupRings(rings))
def testTagConflict(self):
rings = [Ring(6658.8327,6419.3168,114.2888),
Ring(6002.1779,6407.5383,174.68296),
Ring(5960.9529,6413.4276,1337.695)]
self.assertRaises(RingConfusionException, groupRings, rings)
def testTagConflict_Tags(self):
rings = [Ring(6658.8327,6419.3168,114.2888, tag='{Group 2}'),
Ring(6002.1779,6407.5383,174.68296, tag='{Group 2}'),
Ring(5960.9529,6413.4276,1337.695, tag='{Group 2}')]
self.assertRaises(RingConfusionException, groupRings, rings)
def testNonOverlappingTagConflict(self):
regionfile = '''circle(239.85417,1271.2711,373.2043) # color=red width=2 tag={Group 3}
circle(213.98543,1246.2475,734.59645) # color=red width=2 tag={Group 3}
circle(192.45328,1225.4188,1051.7073) # color=red width=2 tag={Group 3}
circle(167.79821,1201.5693,1403.0933) # color=red width=2 tag={Group 3}
circle(-1584.7093,757.48331,373.2043) # color=red width=2 tag={Group 3}
circle(-1575.643,742.29722,734.59645) # color=red width=2 tag={Group 3}'''
rings = [ Ring.fromCircle(rf.Circle.fromStr(x)) for x in regionfile.splitlines() ]
stars = [rings[0:4], rings[4:]]
self.assertEquals(stars, groupRings(rings))
def testTripleStars_AntiGroup(self):
rings = [Ring(5557.5371,6360.424,376.48289, tag='{Group 1}'),
Ring(5978.6207,6389.8704,388.99078),
Ring(6658.8327,6419.3168,114.2888, tag='{Group 3}'),
Ring(6620.5524,6398.7044,369.99343),
Ring(5525.1461,6333.9223,760.27046),
Ring(6002.1779,6407.5383,174.68296, tag='{Group 2}'),
Ring(5560.4817,6392.8151,138.11697),
Ring(5560.4817,6277.9741,1067.2162, tag='{Group 1}'),
Ring(5536.9246,6283.8634,1396.797, tag='{Group 1}'),
Ring(5960.9529,6413.4276,1337.695, tag='{Group 2}')]
stars = [[Ring(5560.4817,6392.8151,138.11697),
Ring(5557.5371,6360.424,376.48289),
Ring(5525.1461,6333.9223,760.27046),
Ring(5560.4817,6277.9741,1067.2162),
Ring(5536.9246,6283.8634,1396.797)],
[Ring(6002.1779,6407.5383,174.68296),
Ring(5978.6207,6389.8704,388.99078),
Ring(5960.9529,6413.4276,1337.695)],
[Ring(6658.8327,6419.3168,114.2888),
Ring(6620.5524,6398.7044,369.99343)]]
self.assertEquals(stars, groupRings(rings))
def testTripleStars_AssociateNoTag(self):
rings = [Ring(6658.8327,6419.3168,114.2888, tag='{Group 3}'),
Ring(6620.5524,6398.7044,369.99343),
Ring(6002.1779,6407.5383,174.68296, tag='{Group 2}'),
Ring(5560.4817,6392.8151,138.11697)]
stars = [[Ring(6658.8327,6419.3168,114.2888, tag='{Group 3}'),
Ring(6620.5524,6398.7044,369.99343)],
[Ring(5560.4817,6392.8151,138.11697)],
[Ring(6002.1779,6407.5383,174.68296, tag='{Group 2}')]]
grouped = groupRings(rings)
self.assertEquals(stars, grouped)
def testContains(self):
ringA = Ring(221.26667,1768,148.45513)
ringB = Ring(710.86667,1786.1333,205.53195)
ringC = Ring(1109.8,1731.7333,69.593566)
ringD = Ring(1091.6667,1731.7333,351.79447)
ringE = Ring(1481,1713.6,205.68956)
self.failIf(ringA.contains(ringB))
self.failIf(ringC.contains(ringA))
self.failIf(ringB.contains(ringC))
self.failIf(ringC.contains(ringB))
self.failIf(ringC.contains(ringE))
self.failUnless(ringD.contains(ringC))
self.failIf(ringC.contains(ringD))
def testTag(self):
ringA = Ring(221.26667,1768,148.45513, tag='{Group 1}')
self.assertEquals(ringA.tag, '{Group 1}')
def testNoTag(self):
ringA = Ring(221.26667,1768,148.45513)
self.assertEquals(ringA.tag, None)
######################
class TestModel(unittest.TestCase):
def testReturnType(self):
imagesize = (512,128)
model = constructReflectionModel(np.ones(imagesize),
np.zeros(imagesize),
[Ring(0,0,20), Ring(0,0,120)],
buffer=1)
self.assertEquals(model.shape, imagesize)
######################
def testRecoverGradient(self):
input = 2*np.ones((512, 128))
x_star = np.array([120, 100])
X,Y = np.meshgrid(np.arange(input.shape[1]), np.arange(input.shape[0]))
dR = np.sqrt((X - x_star[0])**2 + (Y - x_star[1])**2)
input[dR < 30] = 50
select = np.logical_and(dR >= 30, dR < 100)
input[select] = 15 + dR[select]*-.1
rings = [Ring(x_star[0], x_star[1], 30), Ring(x_star[0], x_star[1], 100)]
model = constructReflectionModel(input, np.zeros_like(input), rings, buffer=1)
validTest = np.logical_and(dR > 30, np.logical_or(dR <99, dR > 101))
saveImage('input.fits', input)
saveImage('model.fits', model)
self.assertTrue((np.abs(model[validTest] - input[validTest] + 2) < 1e-2).all())
#########################
def testMask(self):
input = 2*np.ones((512, 128))
mask = np.zeros_like(input)
input[400:415, 100:120] = 1e15
mask[400:415, 100:120] = 1
x_star = np.array([120, 100])
X,Y = np.meshgrid(np.arange(input.shape[1]), np.arange(input.shape[0]))
dR = np.sqrt((X - x_star[0])**2 + (Y - x_star[1])**2)
input[dR < 30] = 50
select = np.logical_and(dR >= 30, dR < 100)
input[select] = 15 + dR[select]*-.1
rings = [Ring(x_star[0], x_star[1], 30), Ring(x_star[0], x_star[1], 100)]
model = constructReflectionModel(input, mask, rings, buffer=1)
validTest = np.logical_and(np.logical_and(dR > 30,
np.logical_or(dR <99,
dR > 101)),
mask > 1)
self.assertTrue((np.abs(model[validTest] - input[validTest]) < 1e-2).all())
###################
def testWeightFile(self):
input = 2*np.ones((512, 128))
mask = np.zeros_like(input)
weight = np.ones_like(input)
input[400:415, 100:120] = 10000
weight[400:415, 100:120] = .0003
x_star = np.array([120, 100])
X,Y = np.meshgrid(np.arange(input.shape[1]), np.arange(input.shape[0]))
dR = np.sqrt((X - x_star[0])**2 + (Y - x_star[1])**2)
input[dR < 30] = 50
select = np.logical_and(dR >= 30, dR < 100)
input[select] = 15 + dR[select]*-.1
rings = [Ring(x_star[0], x_star[1], 30), Ring(x_star[0], x_star[1], 100)]
model = constructReflectionModel(input, mask, rings, weight = weight, buffer=1)
expected = np.copy(input)
expected[weight != 1] = 2
validTest = np.logical_and(np.logical_and(dR > 30,
np.logical_or(dR <99,
dR > 101)),
mask == 0)
self.assertTrue((np.abs(model[validTest] - expected[validTest] + 2) < .1).all())
####################
class TestStarMasking(unittest.TestCase):
def testBasic(self):
weight = np.ones((512, 128))
flag = np.zeros_like(weight)
x_star = np.array([120, 100])
rings = [Ring(x_star[0], x_star[1], 30), Ring(x_star[0], x_star[1], 100)]
X,Y = np.meshgrid(np.arange(weight.shape[1]), np.arange(weight.shape[0]))
dR = np.sqrt((X - x_star[0])**2 + (Y - x_star[1])**2)
insideR1 = dR < rings[0].r
ring2 = np.logical_and(dR > rings[1].r - 2, dR < rings[1].r + 2)
newweight, newflag = maskStars(weight, flag, [rings], buffer=2)
#self.assertTrue((newweight[ring2] == 0).all())
self.assertTrue((newweight[insideR1] == 0).all())
self.assertTrue((newflag[ring2] == 1).all())
self.assertTrue((newflag[insideR1] == 1).all())
###############
def testNoOverwrite(self):
weight = np.ones((512, 128))
flag = np.zeros_like(weight)
x_star = np.array([120, 100])
rings = [Ring(x_star[0], x_star[1], 30), Ring(x_star[0], x_star[1], 100)]
X,Y = np.meshgrid(np.arange(weight.shape[1]), np.arange(weight.shape[0]))
dR = np.sqrt((X - x_star[0])**2 + (Y - x_star[1])**2)
insideR1 = dR < rings[0].r
ring2 = np.logical_and(dR > rings[1].r - 2, dR < rings[1].r + 2)
newweight, newflag = maskStars(weight, flag, [rings], buffer=2)
self.assertTrue((weight == 1).all())
self.assertTrue((flag == 0).all())
###################
def testMultipleStars(self):
weight = np.ones((512, 128))
flag = np.zeros_like(weight)
expectedweight = np.ones_like(weight)
expectedflag = np.zeros_like(weight)
x_star1 = np.array([120, 100])
X,Y = np.meshgrid(np.arange(weight.shape[1]), np.arange(weight.shape[0]))
dR = np.sqrt((X - x_star1[0])**2 + (Y - x_star1[1])**2)
flag[dR < 5] = 1
expectedweight[dR < 35] = 0
expectedflag[dR < 35] = 1
select = np.logical_and(dR >= 30, dR < 100)
inRing = np.logical_and(dR > 95, dR < 105)
#expectedweight[inRing] = 0
expectedflag[inRing] = 1
stars = [[Ring(x_star1[0], x_star1[1], 30), Ring(x_star1[0], x_star1[1], 100)]]
x_star2 = np.array([70, 140])
dR = np.sqrt((X - x_star2[0])**2 + (Y - x_star2[1])**2)
flag[dR < 5] = 1
expectedweight[dR < 20] = 0
expectedflag[dR < 20] = 1
select = np.logical_and(dR >= 15, dR < 30)
inRing = np.logical_and(dR > 25, dR < 35)
#expectedweight[inRing] = 0
expectedflag[inRing] = 1
stars.append([Ring(x_star2[0], x_star2[1], 15), Ring(x_star2[0], x_star2[1], 30)])
newweight, newflag = maskStars(weight, flag, stars, buffer=5)
self.assertTrue((expectedweight == newweight).all())
self.assertTrue((expectedflag == newflag).all())
#######################
class TestSubtractStars(unittest.TestCase):
def testBasic(self):
image = np.zeros((4080,2000))
flag = np.zeros_like(image)
stars = [[Ring(169.62191,2275.477,381.84674),
Ring(260.92933,2318.7279,709.7176),
Ring(337.81979,2386.0071,969.41448),
Ring(222.4841,2361.9788,1424.1761)]]
newimage = subtractStars(image, flag, stars, buffer=1)
self.assertEquals(newimage.shape, image.shape)
##################
def testRemoveSimpleReflectionsAndMask(self):
image = 15*np.ones((512, 128))
flag = np.zeros_like(image)
x_star = np.array([120, 100])
X,Y = np.meshgrid(np.arange(image.shape[1]), np.arange(image.shape[0]))
dR = np.sqrt((X - x_star[0])**2 + (Y - x_star[1])**2)
image[dR < 30] = 50
select = np.logical_and(dR >= 30, dR < 100)
image[select] = 15 + dR[select]*-.1
stars = [[Ring(x_star[0], x_star[1], 30), Ring(x_star[0], x_star[1], 100)]]
newimage = subtractStars(image, flag, stars, buffer=5)
validTest = np.logical_and(dR > 35, np.logical_or(dR <95, dR > 105))
validMask = np.logical_or(dR < 29, np.logical_and(dR >97, dR < 103))
saveImage('test.fits', newimage)
self.assertTrue((np.abs(newimage[validTest] - 15) < 1e-2).all())
#######################
def testRemoveMultipleReflections(self):
image = 10*np.ones((512, 128))
flag = np.zeros_like(image)
expectedflag = np.zeros_like(image)
x_star1 = np.array([120, 100])
X,Y = np.meshgrid(np.arange(image.shape[1]), np.arange(image.shape[0]))
dR = np.sqrt((X - x_star1[0])**2 + (Y - x_star1[1])**2)
image[dR < 30] = 50
flag[dR < 5] = 1
expectedflag[dR < 35] = 1
select = np.logical_and(dR >= 30, dR < 100)
image[select] = 15 + dR[select]*-.1
inRing = np.logical_and(dR > 95, dR < 105)
expectedflag[inRing] = 1
stars = [[Ring(x_star1[0], x_star1[1], 30), Ring(x_star1[0], x_star1[1], 100)]]
x_star2 = np.array([70, 140])
dR = np.sqrt((X - x_star2[0])**2 + (Y - x_star2[1])**2)
image[dR < 15] = 25
flag[dR < 5] = 1
expectedflag[dR < 20] = 1
select = np.logical_and(dR >= 15, dR < 30)
image[select] = image[select] + 2 + dR[select]*-.05
inRing = np.logical_and(dR > 25, dR < 35)
expectedflag[inRing] = 1
stars.append([Ring(x_star2[0], x_star2[1], 15), Ring(x_star2[0], x_star2[1], 30)])
newimage = subtractStars(image, flag, stars, buffer=5)
self.assertTrue((np.abs(newimage[expectedflag == 0] - 10) < 1).all())
#################################
class TestInterpretRegions(unittest.TestCase):
def testAssociateStars(self):
regiontxt='''
# Region file format: DS9 version 4.0
# Filename: /u/ki/dapple/subaru/MACS1149+22/W-J-V/SCIENCE/coadd_MACS1149+22_all/coadd.fits
global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source
physical
circle(5478.0318,6684.3345,373.77281) # color=red width=2 tag={Group 1}
circle(5480.9764,6654.8881,736.84446) # color=red width=2 tag={Group 1}
circle(5480.9764,6628.3863,1055.499) # color=red width=2 tag={Group 1}
circle(4637.0442,5908.6345,202.03205) # color=red width=2 tag={Group 2}
circle(4652.2286,5891.1618,373.77281) # color=red width=2 tag={Group 2}
circle(4645.133,5861.7154,736.84446) # color=red width=2 tag={Group 2}
circle(5466.6878,6575.3828,1418.2524) # color=red width=2 tag={Group 1}
circle(8724.6446,6374.247,202.03205) # color=red width=2 tag={Group 4}
circle(8655.7426,6332.9289,373.77281) # color=red width=2 tag={Group 4}
circle(8631.0768,6300.9724,736.84446) # color=red width=2 tag={Group 4}
circle(2234.9358,1767.0683,179.85811) # color=red width=2 tag={Group 6}
circle(2275.2206,1834.9369,373.77281) # color=red width=2 tag={Group 6}
circle(2345.9362,1888.3218,736.84446) # color=red width=2 tag={Group 6}
circle(7611.5659,1169.7763,373.77281) # color=red width=2 tag={Group 5}
circle(7561.7997,1243.2415,736.84446) # color=red width=2 tag={Group 5}
circle(7544.2294,1322.1614,1055.499) # color=red width=2 tag={Group 5}
circle(4205.3173,9036.1446,139.52686) # color=red width=2 tag={Group 3}
circle(4225.5218,8976.0012,373.77281) # color=red width=2 tag={Group 3}
circle(7464.6798,1402.19,1418.2524) # color=red width=2 tag={Group 5}
'''
stars = [[Ring(5478.0318,6684.3345,373.77281),
Ring(5480.9764,6654.8881,736.84446),
Ring(5480.9764,6628.3863,1055.499),
Ring(5466.6878,6575.3828,1418.2524)],
[Ring(4637.0442,5908.6345,202.03205),
Ring(4652.2286,5891.1618,373.77281),
Ring(4645.133,5861.7154,736.84446)],
[Ring(8724.6446,6374.247,202.03205),
Ring(8655.7426,6332.9289,373.77281),
Ring(8631.0768,6300.9724,736.84446)],
[Ring(2234.9358,1767.0683,179.85811),
Ring(2275.2206,1834.9369,373.77281),
Ring(2345.9362,1888.3218,736.84446)],
[Ring(7611.5659,1169.7763,373.77281),
Ring(7561.7997,1243.2415,736.84446),
Ring(7544.2294,1322.1614,1055.499),
Ring(7464.6798,1402.19,1418.2524)],
[Ring(4205.3173,9036.1446,139.52686),
Ring(4225.5218,8976.0012,373.77281)]]
parsedStars = readRegionFile(regiontxt,(10000,10000))
self.assertEquals(len(stars), len(parsedStars))
#################
def testFilterOffChipStars(self):
regiontxt='''
# Region file format: DS9 version 4.0
# Filename: /u/ki/dapple/subaru/MACS1149+22/W-J-V/SCIENCE/coadd_MACS1149+22_all/coadd.fits
global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source
physical
circle(5478.0318,6684.3345,373.77281) # color=red width=2 tag={Group 1}
circle(5480.9764,6654.8881,736.84446) # color=red width=2 tag={Group 1}
circle(5480.9764,6628.3863,1055.499) # color=red width=2 tag={Group 1}
circle(5466.6878,6575.3828,1418.2524) # color=red width=2 tag={Group 1}
circle(4652.228,5891.1618,373.77281) # color=red width=2 tag={Group 2}
circle(4645.133,5861.7154,736.84446) # color=red width=2 tag={Group 2}
circle(-5000,2000,2.03205) # color=red width=2 tag={Group 4}
circle(-5000,2000,373.77281) # color=red width=2 tag={Group 4}
circle(-5000,2000,736.84446) # color=red width=2 tag={Group 4}
'''
parsedStars = readRegionFile(regiontxt, imageSize=(10000,10000))
self.assertEquals(len(parsedStars), 2)
self.assertEquals(parsedStars[0][0].x,5478.0318 )
self.assertEquals(parsedStars[1][0].x,4652.228 )
#####################################
class TestLoadImages(unittest.TestCase):
def testFindAssociated(self):
filename = 'SCIENCE/SUPA0011008_3OCFS.fits'
weightfile, flagfile, regionfile = findAssociatedFiles(filename)
self.assertEquals(weightfile, 'WEIGHTS/SUPA0011008_3OCFS.weight.fits')
self.assertEquals(flagfile, 'WEIGHTS/SUPA0011008_3OCFS.flag.fits')
self.assertEquals(regionfile,'SUPA0011008_3.reg')
##########
def testFindOutFiles(self):
imagefile = 'SCIENCE/SUPA0011008_3OCFS.fits'
ext = 'R'
outimage, outweight, outflag = findOutfiles(imagefile, ext)
self.assertEquals(outimage, 'SCIENCE/SUPA0011008_3OCFSR.fits')
self.assertEquals(outweight, 'WEIGHTS/SUPA0011008_3OCFSR.weight.fits')
self.assertEquals(outflag, 'WEIGHTS/SUPA0011008_3OCFSR.flag.fits')
#################
class TestObjectSubtraction(unittest.TestCase):
def setUp(self):
self.shape = (200,100)
image = np.ones(self.shape)
weight = np.ones_like(image)
flag = np.zeros_like(image)
image[100:105,50:62] = 25
self.object = image == 25
def writeImage(image, name):
if not os.path.exists(name):
pyfits.PrimaryHDU(image).writeto(name)
self.workdir = 'suppressreflections_tmp'
if not os.path.exists(self.workdir):
os.mkdir(self.workdir)
self.imagename = os.path.join(self.workdir, 'suppressreflections_image.fits')
self.weightname = os.path.join(self.workdir, 'suppressreflections_weight.fits')
self.flagname = os.path.join(self.workdir, 'suppressreflections_flag.fits')
writeImage(image, self.imagename )
writeImage(weight, self.weightname)
writeImage(flag, self.flagname)
##########
def tearDown(self):
if os.path.exists(self.workdir):
toDelete = glob.glob('%s/*' % self.workdir)
for file in toDelete:
os.remove(file)
os.rmdir(self.workdir)
############
def testCreateObjectMask(self):
noobjs_flag = createObjectMask(self.imagename,
self.weightname,
self.flagname,
workdir = self.workdir)
workfiles = glob.glob('%s/*' % self.workdir)
self.assertEquals(len(workfiles), 3)
self.assertEquals(noobjs_flag.shape, self.shape)
self.assertTrue((noobjs_flag[np.logical_not(self.object)] == 0).all())
self.assertTrue((noobjs_flag[self.object] == 1).all())
#######################
def test():
testcases = [TestModel, TestInterpretRegions, TestSubtractStars, TestStarMasking, TestLoadImages, TestObjectSubtraction, TestGroupRings]
suite = unittest.TestSuite(map(unittest.TestLoader().loadTestsFromTestCase,
testcases))
unittest.TextTestRunner(verbosity=2).run(suite)
#############################################
### COMMAND LINE
##############################################
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
test()
else:
main()
| |
"""Support for sensors through the SmartThings cloud API."""
from collections import namedtuple
from typing import Optional, Sequence
from pysmartthings import Attribute, Capability
from homeassistant.const import (
DEVICE_CLASS_BATTERY, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_TIMESTAMP, ENERGY_KILO_WATT_HOUR,
MASS_KILOGRAMS, POWER_WATT, TEMP_CELSIUS, TEMP_FAHRENHEIT)
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
Map = namedtuple("map", "attribute name default_unit device_class")
CAPABILITY_TO_SENSORS = {
Capability.activity_lighting_mode: [
Map(Attribute.lighting_mode, "Activity Lighting Mode", None, None)],
Capability.air_conditioner_mode: [
Map(Attribute.air_conditioner_mode, "Air Conditioner Mode", None,
None)],
Capability.air_quality_sensor: [
Map(Attribute.air_quality, "Air Quality", 'CAQI', None)],
Capability.alarm: [
Map(Attribute.alarm, "Alarm", None, None)],
Capability.audio_volume: [
Map(Attribute.volume, "Volume", "%", None)],
Capability.battery: [
Map(Attribute.battery, "Battery", "%", DEVICE_CLASS_BATTERY)],
Capability.body_mass_index_measurement: [
Map(Attribute.bmi_measurement, "Body Mass Index", "kg/m^2", None)],
Capability.body_weight_measurement: [
Map(Attribute.body_weight_measurement, "Body Weight", MASS_KILOGRAMS,
None)],
Capability.carbon_dioxide_measurement: [
Map(Attribute.carbon_dioxide, "Carbon Dioxide Measurement", "ppm",
None)],
Capability.carbon_monoxide_detector: [
Map(Attribute.carbon_monoxide, "Carbon Monoxide Detector", None,
None)],
Capability.carbon_monoxide_measurement: [
Map(Attribute.carbon_monoxide_level, "Carbon Monoxide Measurement",
"ppm", None)],
Capability.dishwasher_operating_state: [
Map(Attribute.machine_state, "Dishwasher Machine State", None, None),
Map(Attribute.dishwasher_job_state, "Dishwasher Job State", None,
None),
Map(Attribute.completion_time, "Dishwasher Completion Time", None,
DEVICE_CLASS_TIMESTAMP)],
Capability.dryer_mode: [
Map(Attribute.dryer_mode, "Dryer Mode", None, None)],
Capability.dryer_operating_state: [
Map(Attribute.machine_state, "Dryer Machine State", None, None),
Map(Attribute.dryer_job_state, "Dryer Job State", None, None),
Map(Attribute.completion_time, "Dryer Completion Time", None,
DEVICE_CLASS_TIMESTAMP)],
Capability.dust_sensor: [
Map(Attribute.fine_dust_level, "Fine Dust Level", None, None),
Map(Attribute.dust_level, "Dust Level", None, None)],
Capability.energy_meter: [
Map(Attribute.energy, "Energy Meter", ENERGY_KILO_WATT_HOUR, None)],
Capability.equivalent_carbon_dioxide_measurement: [
Map(Attribute.equivalent_carbon_dioxide_measurement,
'Equivalent Carbon Dioxide Measurement', 'ppm', None)],
Capability.formaldehyde_measurement: [
Map(Attribute.formaldehyde_level, 'Formaldehyde Measurement', 'ppm',
None)],
Capability.illuminance_measurement: [
Map(Attribute.illuminance, "Illuminance", 'lux',
DEVICE_CLASS_ILLUMINANCE)],
Capability.infrared_level: [
Map(Attribute.infrared_level, "Infrared Level", '%', None)],
Capability.media_input_source: [
Map(Attribute.input_source, "Media Input Source", None, None)],
Capability.media_playback_repeat: [
Map(Attribute.playback_repeat_mode, "Media Playback Repeat", None,
None)],
Capability.media_playback_shuffle: [
Map(Attribute.playback_shuffle, "Media Playback Shuffle", None, None)],
Capability.media_playback: [
Map(Attribute.playback_status, "Media Playback Status", None, None)],
Capability.odor_sensor: [
Map(Attribute.odor_level, "Odor Sensor", None, None)],
Capability.oven_mode: [
Map(Attribute.oven_mode, "Oven Mode", None, None)],
Capability.oven_operating_state: [
Map(Attribute.machine_state, "Oven Machine State", None, None),
Map(Attribute.oven_job_state, "Oven Job State", None, None),
Map(Attribute.completion_time, "Oven Completion Time", None, None)],
Capability.oven_setpoint: [
Map(Attribute.oven_setpoint, "Oven Set Point", None, None)],
Capability.power_meter: [
Map(Attribute.power, "Power Meter", POWER_WATT, None)],
Capability.power_source: [
Map(Attribute.power_source, "Power Source", None, None)],
Capability.refrigeration_setpoint: [
Map(Attribute.refrigeration_setpoint, "Refrigeration Setpoint", None,
DEVICE_CLASS_TEMPERATURE)],
Capability.relative_humidity_measurement: [
Map(Attribute.humidity, "Relative Humidity Measurement", '%',
DEVICE_CLASS_HUMIDITY)],
Capability.robot_cleaner_cleaning_mode: [
Map(Attribute.robot_cleaner_cleaning_mode,
"Robot Cleaner Cleaning Mode", None, None)],
Capability.robot_cleaner_movement: [
Map(Attribute.robot_cleaner_movement, "Robot Cleaner Movement", None,
None)],
Capability.robot_cleaner_turbo_mode: [
Map(Attribute.robot_cleaner_turbo_mode, "Robot Cleaner Turbo Mode",
None, None)],
Capability.signal_strength: [
Map(Attribute.lqi, "LQI Signal Strength", None, None),
Map(Attribute.rssi, "RSSI Signal Strength", None, None)],
Capability.smoke_detector: [
Map(Attribute.smoke, "Smoke Detector", None, None)],
Capability.temperature_measurement: [
Map(Attribute.temperature, "Temperature Measurement", None,
DEVICE_CLASS_TEMPERATURE)],
Capability.thermostat_cooling_setpoint: [
Map(Attribute.cooling_setpoint, "Thermostat Cooling Setpoint", None,
DEVICE_CLASS_TEMPERATURE)],
Capability.thermostat_fan_mode: [
Map(Attribute.thermostat_fan_mode, "Thermostat Fan Mode", None, None)],
Capability.thermostat_heating_setpoint: [
Map(Attribute.heating_setpoint, "Thermostat Heating Setpoint", None,
DEVICE_CLASS_TEMPERATURE)],
Capability.thermostat_mode: [
Map(Attribute.thermostat_mode, "Thermostat Mode", None, None)],
Capability.thermostat_operating_state: [
Map(Attribute.thermostat_operating_state, "Thermostat Operating State",
None, None)],
Capability.thermostat_setpoint: [
Map(Attribute.thermostat_setpoint, "Thermostat Setpoint", None,
DEVICE_CLASS_TEMPERATURE)],
Capability.three_axis: [],
Capability.tv_channel: [
Map(Attribute.tv_channel, "Tv Channel", None, None)],
Capability.tvoc_measurement: [
Map(Attribute.tvoc_level, "Tvoc Measurement", 'ppm', None)],
Capability.ultraviolet_index: [
Map(Attribute.ultraviolet_index, "Ultraviolet Index", None, None)],
Capability.voltage_measurement: [
Map(Attribute.voltage, "Voltage Measurement", 'V', None)],
Capability.washer_mode: [
Map(Attribute.washer_mode, "Washer Mode", None, None)],
Capability.washer_operating_state: [
Map(Attribute.machine_state, "Washer Machine State", None, None),
Map(Attribute.washer_job_state, "Washer Job State", None, None),
Map(Attribute.completion_time, "Washer Completion Time", None,
DEVICE_CLASS_TIMESTAMP)]
}
UNITS = {
'C': TEMP_CELSIUS,
'F': TEMP_FAHRENHEIT
}
THREE_AXIS_NAMES = ['X Coordinate', 'Y Coordinate', 'Z Coordinate']
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Platform uses config entry setup."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add binary sensors for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
sensors = []
for device in broker.devices.values():
for capability in broker.get_assigned(device.device_id, 'sensor'):
if capability == Capability.three_axis:
sensors.extend(
[SmartThingsThreeAxisSensor(device, index)
for index in range(len(THREE_AXIS_NAMES))])
else:
maps = CAPABILITY_TO_SENSORS[capability]
sensors.extend([
SmartThingsSensor(
device, m.attribute, m.name, m.default_unit,
m.device_class)
for m in maps])
async_add_entities(sensors)
def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:
"""Return all capabilities supported if minimum required are present."""
return [capability for capability in CAPABILITY_TO_SENSORS
if capability in capabilities]
class SmartThingsSensor(SmartThingsEntity):
"""Define a SmartThings Sensor."""
def __init__(self, device, attribute: str, name: str,
default_unit: str, device_class: str):
"""Init the class."""
super().__init__(device)
self._attribute = attribute
self._name = name
self._device_class = device_class
self._default_unit = default_unit
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return '{} {}'.format(self._device.label, self._name)
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return '{}.{}'.format(self._device.device_id, self._attribute)
@property
def state(self):
"""Return the state of the sensor."""
return self._device.status.attributes[self._attribute].value
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
unit = self._device.status.attributes[self._attribute].unit
return UNITS.get(unit, unit) if unit else self._default_unit
class SmartThingsThreeAxisSensor(SmartThingsEntity):
"""Define a SmartThings Three Axis Sensor."""
def __init__(self, device, index):
"""Init the class."""
super().__init__(device)
self._index = index
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return '{} {}'.format(
self._device.label, THREE_AXIS_NAMES[self._index])
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return '{}.{}'.format(
self._device.device_id, THREE_AXIS_NAMES[self._index])
@property
def state(self):
"""Return the state of the sensor."""
three_axis = self._device.status.attributes[Attribute.three_axis].value
try:
return three_axis[self._index]
except (TypeError, IndexError):
return None
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URL endpoint to add new graph data to the datastore."""
import datetime
import json
import logging
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from dashboard import add_point
from dashboard import find_anomalies
from dashboard import graph_revisions
from dashboard import units_to_direction
from dashboard.common import datastore_hooks
from dashboard.common import request_handler
from dashboard.common import stored_object
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
BOT_WHITELIST_KEY = 'bot_whitelist'
class AddPointQueueHandler(request_handler.RequestHandler):
"""Request handler to process points and add them to the datastore.
This request handler is intended to be used only by requests using the
task queue; it shouldn't be directly from outside.
"""
def get(self):
"""A get request is the same a post request for this endpoint."""
self.post()
def post(self):
"""Adds a set of points from the post data.
Request parameters:
data: JSON encoding of a list of dictionaries. Each dictionary represents
one point to add. For each dict, one Row entity will be added, and
any required TestMetadata or Master or Bot entities will be created.
"""
datastore_hooks.SetPrivilegedRequest()
data = json.loads(self.request.get('data'))
_PrewarmGets(data)
bot_whitelist = stored_object.Get(BOT_WHITELIST_KEY)
all_put_futures = []
added_rows = []
monitored_test_keys = []
for row_dict in data:
try:
new_row, parent_test, put_futures = _AddRow(row_dict, bot_whitelist)
added_rows.append(new_row)
is_monitored = parent_test.sheriff and parent_test.has_rows
if is_monitored:
monitored_test_keys.append(parent_test.key)
all_put_futures.extend(put_futures)
except add_point.BadRequestError as e:
logging.error('Could not add %s, it was invalid.', e.message)
except datastore_errors.BadRequestError as e:
logging.error('Datastore request failed: %s.', e.message)
return
ndb.Future.wait_all(all_put_futures)
tests_keys = [k for k in monitored_test_keys if not _IsRefBuild(k)]
# Updating of the cached graph revisions should happen after put because
# it requires the new row to have a timestamp, which happens upon put.
futures = [
graph_revisions.AddRowsToCacheAsync(added_rows),
find_anomalies.ProcessTestsAsync(tests_keys)]
ndb.Future.wait_all(futures)
def _PrewarmGets(data):
"""Prepares the cache so that fetching is faster later.
The add_point request handler does a LOT of gets, and it's possible for
each to take seconds.
However, NDB will does automatic in-context caching:
https://developers.google.com/appengine/docs/python/ndb/cache#incontext
This means that doing an async get() at the start will cache the result, so
that we can prewarm the cache for everything we'll need throughout the
request at the start.
Args:
data: The request json.
"""
# Prewarm lookups of masters, bots, and tests.
master_keys = {ndb.Key('Master', r['master']) for r in data}
bot_keys = {ndb.Key('Master', r['master'], 'Bot', r['bot']) for r in data}
test_keys = set()
for row in data:
start = '%s/%s' % (row['master'], row['bot'])
test_parts = row['test'].split('/')
for part in test_parts:
if not part:
break
start += '/%s' % part
test_keys.add(ndb.Key('TestMetadata', start))
ndb.get_multi_async(list(master_keys) + list(bot_keys) + list(test_keys))
def _AddRow(row_dict, bot_whitelist):
"""Adds a Row entity to the datastore.
There are three main things that are needed in order to make a new entity;
the ID, the parent key, and all of the properties. Making these three
things, and validating the related input fields, are delegated to
sub-functions.
Args:
row_dict: A dictionary obtained from the JSON that was received.
bot_whitelist: A list of whitelisted bots names.
Returns:
A triple: The new row, the parent test, and a list of entity put futures.
Raises:
add_point.BadRequestError: The input dict was invalid.
RuntimeError: The required parent entities couldn't be created.
"""
parent_test = _GetParentTest(row_dict, bot_whitelist)
test_container_key = utils.GetTestContainerKey(parent_test.key)
columns = add_point.GetAndValidateRowProperties(row_dict)
columns['internal_only'] = parent_test.internal_only
row_id = add_point.GetAndValidateRowId(row_dict)
# Update the last-added revision record for this test.
master, bot, test = row_dict['master'], row_dict['bot'], row_dict['test']
test_path = '%s/%s/%s' % (master, bot, test)
last_added_revision_entity = graph_data.LastAddedRevision(
id=test_path, revision=row_id)
entity_put_futures = []
entity_put_futures.append(last_added_revision_entity.put_async())
# If the row ID isn't the revision, that means that the data is Chrome OS
# data, and we want the default revision to be Chrome version.
if row_id != row_dict.get('revision'):
columns['a_default_rev'] = 'r_chrome_version'
# Create the entity and add it asynchronously.
new_row = graph_data.Row(id=row_id, parent=test_container_key, **columns)
entity_put_futures.append(new_row.put_async())
return new_row, parent_test, entity_put_futures
def _GetParentTest(row_dict, bot_whitelist):
"""Gets the parent test for a Row based on an input dictionary.
Args:
row_dict: A dictionary from the data parameter.
bot_whitelist: A list of whitelisted bot names.
Returns:
A TestMetadata entity.
Raises:
RuntimeError: Something went wrong when trying to get the parent test.
"""
master_name = row_dict.get('master')
bot_name = row_dict.get('bot')
test_name = row_dict.get('test').strip('/')
units = row_dict.get('units')
higher_is_better = row_dict.get('higher_is_better')
improvement_direction = _ImprovementDirection(higher_is_better)
internal_only = _BotInternalOnly(bot_name, bot_whitelist)
benchmark_description = row_dict.get('benchmark_description')
parent_test = _GetOrCreateAncestors(
master_name, bot_name, test_name, units=units,
improvement_direction=improvement_direction,
internal_only=internal_only,
benchmark_description=benchmark_description)
return parent_test
def _ImprovementDirection(higher_is_better):
"""Returns an improvement direction (constant from alerts_data) or None."""
if higher_is_better is None:
return None
return anomaly.UP if higher_is_better else anomaly.DOWN
def _BotInternalOnly(bot_name, bot_whitelist):
"""Checks whether a given bot name is internal-only.
If a bot name is internal only, then new data for that bot should be marked
as internal-only.
"""
if not bot_whitelist:
logging.warning(
'No bot whitelist available. All data will be internal-only. If this '
'is not intended, please add a bot whitelist using /edit_site_config.')
return True
return bot_name not in bot_whitelist
def _GetOrCreateAncestors(
master_name, bot_name, test_name, units=None,
improvement_direction=None, internal_only=True, benchmark_description=''):
"""Gets or creates all parent Master, Bot, TestMetadata entities for a Row."""
master_entity = _GetOrCreateMaster(master_name)
_GetOrCreateBot(bot_name, master_entity.key, internal_only)
# Add all ancestor tests to the datastore in order.
ancestor_test_parts = test_name.split('/')
test_path = '%s/%s' % (master_name, bot_name)
suite = None
for index, ancestor_test_name in enumerate(ancestor_test_parts):
# Certain properties should only be updated if the TestMetadata is for a
# leaf test.
is_leaf_test = (index == len(ancestor_test_parts) - 1)
test_properties = {
'units': units if is_leaf_test else None,
'improvement_direction': (improvement_direction
if is_leaf_test else None),
'internal_only': internal_only,
}
ancestor_test = _GetOrCreateTest(
ancestor_test_name, test_path, test_properties)
if index == 0:
suite = ancestor_test
test_path = ancestor_test.test_path
if benchmark_description and suite.description != benchmark_description:
suite.description = benchmark_description
return ancestor_test
def _GetOrCreateMaster(name):
"""Gets or creates a new Master."""
existing = graph_data.Master.get_by_id(name)
if existing:
return existing
new_entity = graph_data.Master(id=name)
new_entity.put()
return new_entity
def _GetOrCreateBot(name, parent_key, internal_only):
"""Gets or creates a new Bot under the given Master."""
existing = graph_data.Bot.get_by_id(name, parent=parent_key)
if existing:
if existing.internal_only != internal_only:
existing.internal_only = internal_only
existing.put()
return existing
logging.info('Adding bot %s/%s', parent_key.id(), name)
new_entity = graph_data.Bot(
id=name, parent=parent_key, internal_only=internal_only)
new_entity.put()
return new_entity
def _GetOrCreateTest(name, parent_test_path, properties):
"""Either gets an entity if it already exists, or creates one.
If the entity already exists but the properties are different than the ones
specified, then the properties will be updated first. This implies that a
new point is being added for an existing TestMetadata, so if the TestMetadata
has been previously marked as deprecated or associated with a stoppage alert,
then it can be updated and marked as non-deprecated.
If the entity doesn't yet exist, a new one will be created with the given
properties.
Args:
name: The string ID of the Test to get or create.
parent_test_path: The test_path of the parent entity.
properties: A dictionary of properties that should be set.
Returns:
An entity (which has already been put).
Raises:
datastore_errors.BadRequestError: Something went wrong getting the entity.
"""
test_path = '%s/%s' % (parent_test_path, name)
existing = graph_data.TestMetadata.get_by_id(test_path)
if not existing:
# Add improvement direction if this is a new test.
if 'units' in properties:
units = properties['units']
direction = units_to_direction.GetImprovementDirection(units)
properties['improvement_direction'] = direction
new_entity = graph_data.TestMetadata(id=test_path, **properties)
new_entity.put()
# TODO(sullivan): Consider putting back Test entity in a scoped down
# form so we can check if it exists here.
return new_entity
# Flag indicating whether we want to re-put the entity before returning.
properties_changed = False
if existing.deprecated:
existing.deprecated = False
properties_changed = True
if existing.stoppage_alert:
alert = existing.stoppage_alert.get()
if alert:
alert.recovered = True
alert.last_row_timestamp = datetime.datetime.now()
alert.put()
else:
logging.warning('Stoppage alert %s not found.', existing.stoppage_alert)
existing.stoppage_alert = None
properties_changed = True
# Special case to update improvement direction from units for TestMetadata
# entities when units are being updated. If an improvement direction is
# explicitly provided in the properties, then we can skip this check since it
# will get overwritten below. Additionally, by skipping we avoid
# touching the entity and setting off an expensive put() operation.
if properties.get('improvement_direction') is None:
units = properties.get('units')
if units:
direction = units_to_direction.GetImprovementDirection(units)
if direction != existing.improvement_direction:
properties['improvement_direction'] = direction
# Go through the list of general properties and update if necessary.
for prop, value in properties.items():
if (hasattr(existing, prop) and value is not None and
getattr(existing, prop) != value):
setattr(existing, prop, value)
properties_changed = True
if properties_changed:
existing.put()
return existing
def _IsRefBuild(test_key):
"""Checks whether a TestMetadata is for a reference build test run."""
test_parts = test_key.id().split('/')
return test_parts[-1] == 'ref' or test_parts[-1].endswith('_ref')
| |
#---------------------------------------------------------------------------
# Copyright 2013 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
"""
Utilitity Class to access VistA Menus System
"""
class VistAMenuUtil(object):
def __init__(self, duz):
self._duz = duz
pass
"""
If not specified,
in goto***Menu function call, vistAClient should be in ready state.
after exit***Menu function call, vistAClient should also be in ready state.
"""
"""
EVE System Menu
"""
def gotoSystemMenu(self, vistAClient):
connection = vistAClient.getConnection()
vistAClient.waitForPrompt()
connection.send("S DUZ=%s D ^XUP\r" % self._duz)
connection.expect("Select OPTION NAME: ")
connection.send("EVE\r")
connection.expect("CHOOSE 1-")
connection.send("1\r")
connection.expect("Select Systems Manager Menu ")
def exitSystemMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.expect("Select Systems Manager Menu ")
connection.send("\r")
connection.expect("Do you really want to halt\?")
connection.send("\r")
vistAClient.waitForPrompt()
connection.send("\r")
""" Programmer Options """
def gotoProgrammerMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoSystemMenu(vistAClient)
connection.send("Programmer Options\r")
connection.expect("Select Programmer Options ")
def exitProgrammerMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.expect("Select Programmer Options ")
connection.send("\r")
self.exitSystemMenu(vistAClient)
""" KIDS Menu SubSection """
def gotoKidsMainMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoProgrammerMenu(vistAClient)
connection.send("KIDS\r")
connection.expect("Select Kernel Installation \& Distribution System ")
def exitKidsMainMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.expect("Select Kernel Installation \& Distribution System")
connection.send("\r")
self.exitProgrammerMenu(vistAClient)
def gotoKidsUtilMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoKidsMainMenu(vistAClient)
connection.send("Utilities\r")
connection.expect("Select Utilities ")
def exitKidsUtilMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.send("\r")
self.exitKidsMainMenu(vistAClient)
""" Taskman Menu SubSection """
def gotoTaskmanMainMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoSystemMenu(vistAClient)
connection.send("Taskman Management\r")
connection.expect("Select Taskman Management ")
def exitTaskmanMainMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.expect("Select Taskman Management ")
connection.send("\r")
self.exitSystemMenu(vistAClient)
def gotoTaskmanMgrUtilMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoTaskmanMainMenu(vistAClient)
connection.send("Taskman Management Utilities\r")
connection.expect("Select Taskman Management Utilities ")
def exitTaskmanMgrUtilMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.expect("Select Taskman Management Utilities ")
connection.send("\r")
self.exitTaskmanMainMenu(vistAClient)
def gotoTaskmanEditParamMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoTaskmanMgrUtilMenu(vistAClient)
connection.send("Edit Taskman Parameters\r")
connection.expect("Select Edit Taskman Parameters ")
def exitTaskmanEditParamMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.expect("Select Edit Taskman Parameters ")
connection.send("\r")
self.exitTaskmanMgrUtilMenu(vistAClient)
""" HL7 Menu SubSection """
def gotoHL7MainMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoSystemMenu(vistAClient)
connection.send("HL7 Main Menu\r")
connection.expect("Select HL7 Main Menu ")
def exitHL7MainMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.expect("Select HL7 Main Menu ")
connection.send("\r")
self.exitSystemMenu(vistAClient)
def gotoHL7FilerLinkMgrMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoHL7MainMenu(vistAClient)
connection.send("Filer and Link Management Options\r")
connection.expect("Select Filer and Link Management Options ")
def exitHL7FilerLinkMgrMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.expect("Select Filer and Link Management Options ")
connection.send("\r")
self.exitHL7MainMenu(vistAClient)
""" Mailman Menu Sub-Section """
def gotoMailmanMasterMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoSystemMenu(vistAClient)
connection.send("Mailman Master Menu\r")
connection.expect("Select MailMan Master Menu ")
def exitMailmanMasterMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.expect("Select MailMan Master Menu ")
connection.send("\r")
self.exitSystemMenu(vistAClient)
def gotoMailmanManageMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoMailmanMasterMenu(vistAClient)
connection.send("Manage Mailman\r")
connection.expect("Select Manage Mailman ")
def exitMailmanManageMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.expect("Select Manage Mailman ")
connection.send("\r")
self.exitMailmanMasterMenu(vistAClient)
def gotoMailmanLocalDeliveryMgrMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoMailmanManageMenu(vistAClient)
connection.send("Local Delivery Management\r")
connection.expect("Select Local Delivery Management ")
def exitMailmanLocalDeliveryMgrMenu(self, vistAClient):
connection = vistAClient.getConnection()
connection.expect("Select Local Delivery Management ")
connection.send("\r")
self.exitMailmanManageMenu(vistAClient)
"""
FileMan Menu Section
"""
def gotoFileManMenu(self, vistAClient):
connection = vistAClient.getConnection()
vistAClient.waitForPrompt()
connection.send("S DUZ=%s D Q^DI\r" % self._duz)
connection.expect("Select OPTION:")
def exitFileManMenu(self, vistAClient, waitOption=True):
connection = vistAClient.getConnection()
if waitOption:
connection.expect("Select OPTION: ")
connection.send("\r")
vistAClient.waitForPrompt()
connection.send("\r")
def gotoFileManEditEnterEntryMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoFileManMenu(vistAClient)
connection.send("1\r" )# enter or edit entry
connection.expect("OUTPUT FROM WHAT FILE:")
def gotoFileManPrintFileEntryMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoFileManMenu(vistAClient)
connection.send("2\r" ) # print file entry
connection.expect("OUTPUT FROM WHAT FILE:")
def gotoFileManSearchFileEntryMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoFileManMenu(vistAClient)
connection.send("3\r") # search file entry
connection.expect("OUTPUT FROM WHAT FILE:")
def gotoFileManInquireFileEntryMenu(self, vistAClient):
connection = vistAClient.getConnection()
self.gotoFileManMenu(vistAClient)
connection.send("5\r" ) # inquiry file entry
connection.expect("OUTPUT FROM WHAT FILE:")
| |
# -*- coding: utf-8 -*-
import rs.sqldb as sqldb
import csv
import slughifi
import simplejson as json
import os
from os.path import join as osjoin
import shutil
import re
with open( 'trans.json', 'rb' ) as trans_file:
content = trans_file.read()
translation = json.loads( content )
def trans( key ):
if key not in translation:
print 'WARNING: key %s not in translation' % key
return translation.get( key, '???' )
def get_collection_data( post_data ):
'''Get collection data from POST and return it in a dict.'''
collection_data = {
'name' : post_data.get( 'name', '' ),
'label' : post_data.get( 'label', '' ),
'visible': post_data.get( 'vis', '' ),
'parents': []
}
if post_data.get( 'all_colls' ):
direct_parent_id = int( post_data.get( 'all_colls' ) )
else:
direct_parent_id = None
collection_data['parents'].append( direct_parent_id )
if post_data.get('type') == 'new':
i = 0
while post_data.get( 'ancestor-name-%d' % i ):
collection_data['parents'].append({
'name' : post_data.get( 'ancestor-name-%d' % i ),
'description': post_data.get( 'ancestor-desc-%d' % i ),
'label' : None
})
i += 1
return collection_data
def collection_data_validated( data ):
'''Check if collection with such a name and parent does not collide with
the other potential siblings.'''
db_tree = sqldb.get_db_tree()
parent_id = data['parents'][0]
siblings = filter( lambda e: e['parent'] == parent_id, db_tree )
name = data['name'] if len( data['parents'] ) == 1 else data['parents'][1]['name']
return name.encode('utf-8') not in [ sib['name'] for sib in siblings ]
def hierarchy_validated( hierarchy, labels ):
'''Check indexes of hierarchy fields. Checks if all indexes are smaller than
maximal possible index and if they are not repeated.
Index -1 in 'aux_index' means that it
was not chosen, 'index' field = 1 should not happend.'''
if hierarchy[0]['index'] == -1:
return False
# if user left last field empty, remove it
if len( hierarchy ) > 1 and hierarchy[-1]['index'] == -1:
del hierarchy[-1]
# count how many times indexes appear in hierarchy
counter = [ 0 for _ in labels ]
for level in hierarchy:
column_ind = level['index']
aux_ind = level['aux_index']
counter[ column_ind ] -= 1
try:
counter[ aux_ind ] -= 1
except:
# if index is beyond possible range
pass
# check if no index is repeated
bad_indexes = filter( lambda x: x < 0, counter )
return len( bad_indexes ) > 0
def save_upl_file( upl_file ):
'''Save content of upl_file in a temporary file and return its name.'''
tmp_name = 'tmp.csv'
tmp_file = open( tmp_name, 'w' )
for chunk in upl_file.chunks():
tmp_file.write( chunk )
tmp_file.close()
upl_file.seek( 0 )
return tmp_name
def get_header_labels( upl_file ):
# Get labels from the header of the uploaded file.'''
reader = csv.reader( upl_file, quotechar='"', delimiter=';' )
header = reader.next()
upl_file.seek( 0 )
utf_header = [ field.decode('utf-8') for field in header ]
return utf_header
def guess_types( file_name, hierarchy ):
'''Try to guess types in the data file, but omit hierarchy fields.
Return those types in the list.'''
upl_file = open( file_name, 'rb' )
reader = csv.reader( upl_file, quotechar='"', delimiter=';' )
reader.next()
first_line = reader.next()
types = []
# Get types for all fields.
for field in first_line:
if is_int( field ):
types.append( get_int_type_info( field ) )
elif is_float( field ):
types.append( get_float_type_info( field ) )
else:
types.append( get_string_type_info( field ) )
upl_file.close()
# Remove hierarchy fields from the types list.'''
hierarchy_indexes = get_hierarchy_indexes( hierarchy )
for i in hierarchy_indexes:
del types[i]
return types
def get_columns_descr( hierarchy, labels, types ):
'''Get non hierarchy columns description.'''
indexes = get_hierarchy_indexes( hierarchy )
labels_copy = labels[:]
# Remove labels of hierarchy columns.
for i in indexes:
del labels_copy[ i ]
columns_descr = []
for i in range( len( types ) ):
columns_descr.append({
'label' : labels_copy[ i ],
'type' : types[ i ]['type'],
'format' : types[ i ]['format']
})
return columns_descr
def get_hierarchy_indexes( hierarchy ):
'''Get from each hierarchy field indexes of hierarchy column and auxiliary
column (if it exists). Return them in reversed order.'''
indexes = []
for level in hierarchy:
col_ind = level['index']
aux_ind = level['aux_index']
if col_ind != -1:
indexes.append( col_ind )
if aux_ind != -1:
indexes.append( aux_ind )
return sorted( indexes, reverse=True )
def columns_validated( columns, hierarchy, labels ):
'''Check if all non hierarchy columns are described in columns description
and for each column check its attributes: type, basic and processable.'''
if len( columns ) + len( get_hierarchy_indexes( hierarchy ) ) != len( labels ):
return False
for col in columns:
if col['type'] not in ['string', 'number']:
return False
if (col['basic'] not in [True, False]) and (col['processable'] not in [True, False]):
return False
return True
def get_columns_errors( columns ):
errors = []
for (i, col) in enumerate( columns, 1 ):
error = []
if col['type'] not in ['string', 'number']:
error.append( '%s: %s' % (trans('py_wrong_type'), col['type']) )
if col['basic'] not in [True, False]:
error.append( '%s: %s' % (trans('py_wrong_basic'), col['basic']) )
if col['processable'] not in [True, False]:
error.append( '%s: %s' % (trans('py_wrong_proc'), col['processable ']) )
if error != []:
error_msg = ', '.join( error )
errors.append( '%s %d: %s' % (trans('py_column'), i, error_msg) )
return errors
def label_to_key( label ):
# need to cut because of POSTRGES max column name length
return slughifi.slughifi(label, True).replace('-', '_')[:63]
def get_int_type_info( value ):
return {
'type': 'number',
'format': '# ##0'
}
def get_float_type_info( value ):
return {
'type': 'number',
'format': '# ##0.00'
}
def get_string_type_info( value ):
return {
'type': 'string',
'format': '@'
}
def is_int( value ):
try:
int( value )
except ValueError:
return False
return True
def is_float( value ):
try:
parsed_value = re.sub( '\s', '', value )
float( parsed_value )
except ValueError:
return False
return True
def is_string( value ):
try:
str( value )
except ValueError:
return False
return True
def create_desc_file( coll_data, hier, cols, user, fname):
'''Create file describing collection with description of hierarchy, columns,
collection's label and name, parent collections and uploader's login.'''
# Fill key and index fields in columns.
columns = add_key( cols )
columns = add_columns_indexes( columns, hier )
# Change direct parent's index to fields describing all antecendants.
parents = id_to_path( coll_data['parents'][0] ) + coll_data['parents'][1:]
merged_desc = {
'name' : coll_data['name'],
'description': None,
'label' : coll_data['label'],
'columns' : columns,
'hierarchy' : hier,
'parents' : parents,
'user' : user
}
with open( fname, 'wb' ) as f:
f.write( json.dumps( merged_desc, sort_keys=True, indent=4, encoding='utf-8' ) )
def id_to_path( par_id ):
'''Change parent id of a node in dbtree to name, description and label
of all antecedants. Direct parent is the last element in the list.'''
if par_id is None:
return []
db_tree = sqldb.get_db_tree()
# dict: id -> name
id_dict = dict( zip([ n['id'] for n in db_tree ], db_tree) )
# move to next parents to get their info until top parent is reached
path = []
parent_id = int( par_id )
while True:
parent = id_dict[ parent_id ]
path.append({
'name': parent['name'],
'description': parent['description'],
'label': None
})
try:
parent_id = int( parent['parent'] )
except:
break
path.reverse()
return path
def add_key( columns ):
columns_copy = columns[:]
for c in columns_copy:
c['key'] = label_to_key( c['label'] )
return columns_copy
def add_labels( hierarchy, labels ):
'''Add labels to hierarchy fields. Each field gets 'label', which is label
of hierarchy column and 'aux_label' if the field has auxiliary column.'''
hierarchy_copy = hierarchy[:]
# dict: label_index -> label
labels_dict = dict( zip( range( len(labels) ), labels ) )
for level in hierarchy_copy:
level['label'] = labels_dict[ level['index'] ]
level['aux'] = level['aux_index'] != -1
if level['aux_index'] != -1:
level['aux_label'] = labels_dict[ level['aux_index'] ]
return hierarchy_copy
def add_columns_indexes( columns, hierarchy ):
'''Returns copy of columns with their indexes in data file added.'''
columns_copy = columns[:]
hierarchy_indexes = get_hierarchy_indexes( hierarchy )
both_size = len( hierarchy_indexes ) + len( columns )
columns_indexes = range( both_size )
# Leave only non hierarchy columns' indexes.
for ind in hierarchy_indexes:
del columns_indexes[ind]
for (i, ind) in enumerate(columns_indexes):
columns_copy[i]['index'] = ind
return columns_copy
def move_src_file(filename, new_name):
'''Move file with data to directory with data files.'''
new_filename = new_name + '.csv'
curr_path = os.getcwd()
new_path = osjoin( curr_path, 'site_media', 'csv', new_filename )
print 'Copy file %s to %s' % (new_filename, new_path)
shutil.move( filename, new_path )
def remove_files( files ):
'''Remove temporary files used to upload data into db.'''
for f in files:
os.remove( f )
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""## Sparse Tensor Representation
Tensorflow supports a `SparseTensor` representation for data that is sparse
in multiple dimensions. Contrast this representation with `IndexedSlices`,
which is efficient for representing tensors that are sparse in their first
dimension, and dense along all other dimensions.
@@SparseTensor
@@SparseTensorValue
## Conversion
@@sparse_to_dense
@@sparse_tensor_to_dense
@@sparse_to_indicator
@@sparse_merge
## Manipulation
@@sparse_concat
@@sparse_reorder
@@sparse_split
@@sparse_retain
@@sparse_fill_empty_rows
## Math Operations
@@sparse_add
@@sparse_tensor_dense_matmul
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_sparse_ops import *
# pylint: enable=wildcard-import
# pylint: disable=protected-access
def sparse_concat(concat_dim, sp_inputs, name=None):
"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of each sparse input.
It is assumed that each inputs is a `SparseTensor` whose elements are ordered
along increasing dimension number.
All inputs' shapes must match, except for the concat dimension. The
`indices`, `values`, and `shapes` lists must have the same length.
The output shape is identical to the inputs', except along the concat
dimension, where it is the sum of the inputs' sizes along that dimension.
The output elements will be resorted to preserve the sort order along
increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `concat_dim = 1` and the inputs are
sp_inputs[0]: shape = [2, 3]
[0, 2]: "a"
[1, 0]: "b"
[1, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
then the output will be
shape = [2, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b c ] [ ] [b c ]
Args:
concat_dim: Dimension to concatenate along.
sp_inputs: List of `SparseTensor` to concatenate.
name: A name prefix for the returned tensors (optional).
Returns:
A `SparseTensor` with the concatenated output.
Raises:
TypeError: If `sp_inputs` is not a list of `SparseTensor`.
"""
if not isinstance(sp_inputs, list):
raise TypeError("Inputs must be a list")
if not all(isinstance(sp_input, ops.SparseTensor) for sp_input in sp_inputs):
raise TypeError("All inputs must be SparseTensors")
if len(sp_inputs) == 1: # Degenerate case of one tensor.
return sp_inputs[0]
inds = [sp_input.indices for sp_input in sp_inputs]
vals = [sp_input.values for sp_input in sp_inputs]
shapes = [sp_input.shape for sp_input in sp_inputs]
output_ind, output_val, output_shape = (
gen_sparse_ops._sparse_concat(inds,
vals,
shapes,
concat_dim,
name=name))
return ops.SparseTensor(output_ind, output_val, output_shape)
def sparse_add(sp_a, sp_b, thresh=0):
"""Adds two `SparseTensor` objects to produce another `SparseTensor`.
The input `SparseTensor` objects' indices are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
`SparseReorder` to restore index ordering.
By default, if two values sum to zero at some index, the output `SparseTensor`
would still include that particular location in its index, storing a zero in
the corresponding value slot. To override this, callers can specify `thresh`,
indicating that if the sum has a magnitude strictly smaller than `thresh`, its
corresponding value and index would then not be included. In particular,
`thresh == 0.0` (default) means everything is kept and actual thresholding
happens only for a positive value.
For example, suppose the logical sum is (densified):
[ 2]
[.1 ]
[ 6 -.2]
Then,
- thresh == 0 (the default): all 4 index/value pairs will be returned.
- thresh == 0.11: only .1 will vanish, and the remaining three index/value
pairs will be returned.
- thresh == 0.21: both .1 and -.2 will vanish.
Args:
sp_a: The first input `SparseTensor`.
sp_b: The second input `SparseTensor`.
thresh: A 0-D `Tensor`. The magnitude threshold that determines if an
output value/index pair takes space. Its dtype should match that of the
values if they are real; if the latter are complex64/complex128, then the
dtype should be float32/float64, correspondingly.
Returns:
A `SparseTensor` with the same shape, representing the sum.
Raises:
TypeError: If either `sp_a` or `sp_b` is not a `SparseTensor`.
"""
if not all(isinstance(sp_input, ops.SparseTensor) for sp_input in [sp_a,
sp_b]):
raise TypeError("All inputs must be SparseTensors")
thresh = ops.convert_to_tensor(thresh, dtype=sp_a.values.dtype.real_dtype,
name="thresh")
output_ind, output_val, output_shape = (
gen_sparse_ops._sparse_add(sp_a.indices,
sp_a.values,
sp_a.shape,
sp_b.indices,
sp_b.values,
sp_b.shape,
thresh))
return ops.SparseTensor(output_ind, output_val, output_shape)
@ops.RegisterShape("SparseAdd")
def _SparseAddShape(op): # pylint: disable=invalid-name
input_shape_shape = op.inputs[2].get_shape()
dim = input_shape_shape.num_elements()
return [
tensor_shape.TensorShape([None, dim]),
tensor_shape.unknown_shape(1),
input_shape_shape
]
@ops.RegisterShape("SparseAddGrad")
def _SparseAddGradShape(op): # pylint: disable=invalid-name
# shapes for (a_val_grad, b_val_grad)
a_nnz = op.inputs[1].get_shape()[0]
b_nnz = op.inputs[2].get_shape()[0]
return [tensor_shape.TensorShape([a_nnz]), tensor_shape.TensorShape([b_nnz])]
@ops.RegisterShape("SparseConcat")
def _SparseConcatShape(op):
"""Shape function for SparseConcat op."""
num_inputs = int(op.get_attr("N"))
# TF flattens and concatenates all list inputs, so reconstruct the lists here.
ind_shapes = [ind.get_shape().with_rank(2) for ind in op.inputs[0:num_inputs]]
val_shapes = [val.get_shape().with_rank(1)
for val in op.inputs[num_inputs:2 * num_inputs]]
shape_shapes = [shape.get_shape().with_rank(1)
for shape in op.inputs[2 * num_inputs:]]
output_ind_rows = tensor_shape.Dimension(0)
output_ind_cols = tensor_shape.Dimension(None)
output_val_elems = tensor_shape.Dimension(0)
output_shape_shape = tensor_shape.TensorShape(None)
for i in xrange(num_inputs):
num_elems_i = ind_shapes[i][0].merge_with(val_shapes[i][0])
output_ind_rows += num_elems_i
output_ind_cols = output_ind_cols.merge_with(ind_shapes[i][1])
output_val_elems += num_elems_i
output_shape_shape = output_shape_shape.merge_with(shape_shapes[i])
output_ind_shape = tensor_shape.matrix(output_ind_rows, output_ind_cols)
output_val_shape = tensor_shape.vector(output_val_elems)
return [output_ind_shape, output_val_shape, output_shape_shape]
def sparse_reorder(sp_input, name=None):
"""Reorders a `SparseTensor` into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering
along increasing dimension number. The only time ordering can be violated
is during manual manipulation of the indices and values to add entries.
Reordering does not affect the shape of the `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[4, 5]` and
`indices` / `values`:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same shape and non-empty values, but in
canonical ordering.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
if not isinstance(sp_input, ops.SparseTensor):
raise TypeError("Input must be a SparseTensor")
reordered_ind, reordered_val = (
gen_sparse_ops._sparse_reorder(sp_input.indices,
sp_input.values,
sp_input.shape,
name=name))
return ops.SparseTensor(reordered_ind, reordered_val,
array_ops.identity(sp_input.shape))
@ops.RegisterShape("SparseReorder")
def _SparseReorderShape(op):
"""Shape function for SparseReorder op."""
input_indices_shape = op.inputs[0].get_shape().with_rank(2)
input_values_shape = op.inputs[1].get_shape().with_rank(1)
unused_shape_shape = op.inputs[2].get_shape().with_rank(1)
return [input_indices_shape, input_values_shape]
def sparse_split(split_dim, num_split, sp_input, name=None):
"""Split a `SparseTensor` into `num_split` tensors along `split_dim`.
If the `sp_input.shape[split_dim]` is not an integer multiple of `num_split`
each slice starting from 0:`shape[split_dim] % num_split` gets extra one
dimension. For example, if `split_dim = 1` and `num_split = 2` and the
input is:
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
output_tensor[0] =
[ a ]
[b c ]
output_tensor[1] =
[ d e ]
[ ]
Args:
split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.
num_split: A Python integer. The number of ways to split.
sp_input: The `SparseTensor` to split.
name: A name for the operation (optional).
Returns:
`num_split` `SparseTensor` objects resulting from splitting `value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
if not isinstance(sp_input, ops.SparseTensor):
raise TypeError("Input must be a SparseTensor")
output_inds, output_vals, output_shapes = (
gen_sparse_ops._sparse_split(split_dim,
sp_input.indices,
sp_input.values,
sp_input.shape,
num_split,
name=name))
sparse_tensors = []
for i in range(0, num_split):
sparse_tensors.append(ops.SparseTensor(output_inds[i], output_vals[i],
output_shapes[i]))
return sparse_tensors
# pylint: disable=invalid-name
@ops.RegisterShape("SparseSplit")
def _SparseSplitShape(op):
"""Shape function for SparseSplit op."""
num_split = int(op.get_attr("num_split"))
input_shape_shape = op.inputs[3].get_shape()
dim = input_shape_shape.num_elements()
output_indices_shape = tensor_shape.TensorShape([None, dim])
output_values_shape = tensor_shape.unknown_shape(1)
output_indices_shape = [output_indices_shape] * num_split
output_values_shape = [output_values_shape] * num_split
output_shape_shape = [input_shape_shape] * num_split
return output_indices_shape + output_values_shape + output_shape_shape
# pylint: enable=invalid-name
@ops.RegisterShape("SparseToDense")
def _SparseToDenseShape(op):
input_shape = tensor_util.constant_value(op.inputs[1])
if input_shape is not None:
if np.ndim(input_shape) > 1:
raise ValueError("Input shape should be a vector")
return [tensor_shape.TensorShape(input_shape)]
else:
input_shape_shape = op.inputs[1].get_shape().with_rank(1)
return [tensor_shape.unknown_shape(ndims=input_shape_shape[0].value)]
def sparse_to_dense(sparse_indices,
output_shape,
sparse_values,
default_value=0,
validate_indices=True,
name=None):
"""Converts a sparse representation into a dense tensor.
Builds an array `dense` with shape `output_shape` such that
```python
# If sparse_indices is scalar
dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i
dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n)
dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
```
All other values in `dense` are set to `default_value`. If `sparse_values`
is a scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If `validate_indices` is True, these properties
are checked during execution.
Args:
sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.
`sparse_indices[i]` contains the complete index where `sparse_values[i]`
will be placed.
output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape
of the dense output tensor.
sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of
`sparse_indices`, or a scalar value to be used for all sparse indices.
default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value
to set for indices not specified in `sparse_indices`. Defaults to zero.
validate_indices: A boolean value. If True, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name for the operation (optional).
Returns:
Dense `Tensor` of shape `output_shape`. Has the same type as
`sparse_values`.
"""
return gen_sparse_ops._sparse_to_dense(sparse_indices,
output_shape,
sparse_values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
def sparse_tensor_to_dense(sp_input,
default_value=0,
validate_indices=True,
name=None):
"""Converts a `SparseTensor` into a dense tensor.
This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s.
For example, if `sp_input` has shape `[3, 5]` and non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
and `default_value` is `x`, then the output will be a dense `[3, 5]`
string tensor with values:
[[x a x b x]
[x x x x x]
[c x x x x]]
Indices must be without repeats. This is only
tested if validate_indices is True.
Args:
sp_input: The input `SparseTensor`.
default_value: Scalar value to set for indices not specified in
`sp_input`. Defaults to zero.
validate_indices: A boolean value. If `True`, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name prefix for the returned tensors (optional).
Returns:
A dense tensor with shape `sp_input.shape` and values specified by
the non-empty values in `sp_input`. Indices not in `sp_input` are assigned
`default_value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
if not isinstance(sp_input, ops.SparseTensor):
raise TypeError("Input must be a SparseTensor")
return sparse_to_dense(sp_input.indices,
sp_input.shape,
sp_input.values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
def sparse_to_indicator(sp_input, vocab_size, name=None):
"""Converts a `SparseTensor` of ids into a dense bool indicator tensor.
The last dimension of `sp_input.indices` is discarded and replaced with
the values of `sp_input`. If `sp_input.shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn, vocab_size]`, where
output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True
and False elsewhere in `output`.
For example, if `sp_input.shape = [2, 3, 4]` with non-empty values:
[0, 0, 0]: 0
[0, 1, 0]: 10
[1, 0, 3]: 103
[1, 1, 2]: 150
[1, 1, 3]: 149
[1, 1, 4]: 150
[1, 2, 1]: 121
and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool
tensor with False everywhere except at positions
(0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150),
(1, 2, 121).
Note that repeats are allowed in the input SparseTensor.
This op is useful for converting `SparseTensor`s into dense formats for
compatibility with ops that expect dense tensors.
The input `SparseTensor` must be in row-major order.
Args:
sp_input: A `SparseTensor` with `values` property of type `int32` or
`int64`.
vocab_size: A scalar int64 Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_input.values < vocab_size)`.
name: A name prefix for the returned tensors (optional)
Returns:
A dense bool indicator tensor representing the indices with specified value.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
if not isinstance(sp_input, ops.SparseTensor):
raise TypeError("Input must be a SparseTensor")
with ops.op_scope([sp_input], name, "SparseToIndicator") as name:
num_entries = array_ops.shape(sp_input.indices)[0]
new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True)
sp_values = ops.SparseTensor(sp_input.indices, new_values, sp_input.shape)
sp_new = sparse_merge(sp_input, sp_values, vocab_size, name)
# validate_indices may be False because we allow duplicates in new_indices:
# repeated indices are allowed when creating an indicator matrix.
return sparse_tensor_to_dense(sp_new,
default_value=False,
validate_indices=False,
name=name)
def sparse_merge(sp_ids, sp_values, vocab_size, name=None):
"""Combines a batch of feature ids and values into a single `SparseTensor`.
The most common use case for this function occurs when feature ids and
their corresponding values are stored in `Example` protos on disk.
`parse_example` will return a batch of ids and a batch of values, and this
function joins them into a single logical `SparseTensor` for use in
functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc.
The `SparseTensor` returned by this function has the following properties:
- `indices` is equivalent to `sp_ids.indices` with the last
dimension discarded and replaced with `sp_ids.values`.
- `values` is simply `sp_values.values`.
- If `sp_ids.shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn, vocab_size]`.
For example, consider the following feature vectors:
vector1 = [-3, 0, 0, 0, 0, 0]
vector2 = [ 0, 1, 0, 4, 1, 0]
vector3 = [ 5, 0, 0, 9, 0, 0]
These might be stored sparsely in the following Example protos by storing
only the feature ids (column number if the vectors are treated as a matrix)
of the non-zero elements and the corresponding values:
examples = [Example(features={
"ids": Feature(int64_list=Int64List(value=[0])),
"values": Feature(float_list=FloatList(value=[-3]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[1, 4, 3])),
"values": Feature(float_list=FloatList(value=[1, 1, 4]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[0, 3])),
"values": Feature(float_list=FloatList(value=[5, 9]))})]
The result of calling parse_example on these examples will produce a
dictionary with entries for "ids" and "values". Passing those two objects
to this function will produce a `SparseTensor` that sparsely represents
all three instances. Namely, the `indices` property will contain
the coordinates of the non-zero entries in the feature matrix (the first
dimension is the row number in the matrix, i.e., the index within the batch,
and the second dimension is the column number, i.e., the feature id);
`values` will contain the actual values. `shape` will be the shape of the
original matrix, i.e., (3, 7). For our example above, the output will be
equal to:
SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]],
values=[-3, 1, 4, 1, 5, 9],
shape=[3, 7])
Args:
sp_ids: A `SparseTensor` with `values` property of type `int32`
or `int64`.
sp_values: A`SparseTensor` of any type.
vocab_size: A scalar `int64` Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` compactly representing a batch of feature ids and values,
useful for passing to functions that expect such a `SparseTensor`.
Raises:
TypeError: If `sp_ids` or `sp_values` are not a `SparseTensor`.
"""
if not isinstance(sp_ids, ops.SparseTensor):
raise TypeError("sp_ids must be a SparseTensor")
if not isinstance(sp_values, ops.SparseTensor):
raise TypeError("sp_values must be a SparseTensor")
with ops.op_scope([sp_ids, sp_values], name, "SparseMerge"):
indices_shape = array_ops.shape(sp_ids.indices)
rank = indices_shape[1]
ids = sp_ids.values
if ids.dtype != dtypes.int64:
ids = math_ops.cast(ids, dtypes.int64)
# Slice off the last dimension of indices, then then tack on the ids
indices_columns_to_preserve = array_ops.slice(
sp_ids.indices, [0, 0], array_ops.pack([-1, rank - 1]))
new_indices = array_ops.concat(1, [indices_columns_to_preserve,
array_ops.reshape(ids, [-1, 1])])
new_values = sp_values.values
new_shape = array_ops.concat(
0,
[array_ops.slice(sp_ids.shape, [0], array_ops.expand_dims(rank - 1, 0)),
math_ops.cast(array_ops.pack([vocab_size]), dtypes.int64)])
return sparse_reorder(ops.SparseTensor(new_indices, new_values, new_shape))
def sparse_retain(sp_input, to_retain):
"""Retains specified non-empty values within a `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
and `to_retain = [True, False, False, True]`, then the output will
be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:
[0, 1]: a
[3, 1]: d
Args:
sp_input: The input `SparseTensor` with `N` non-empty elements.
to_retain: A bool vector of length `N` with `M` true values.
Returns:
A `SparseTensor` with the same shape as the input and `M` non-empty
elements corresponding to the true positions in `to_retain`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
if not isinstance(sp_input, ops.SparseTensor):
raise TypeError("Input must be a SparseTensor")
to_retain = ops.convert_to_tensor(to_retain)
# Shape checking, if shape is known at graph construction time
retain_shape = to_retain.get_shape()
retain_shape.assert_has_rank(1)
sp_input.values.get_shape()[0].merge_with(retain_shape[0])
where_true = array_ops.reshape(array_ops.where(to_retain), [-1])
new_indices = array_ops.gather(sp_input.indices, where_true)
new_values = array_ops.gather(sp_input.values, where_true)
return ops.SparseTensor(new_indices, new_values,
array_ops.identity(sp_input.shape))
def sparse_fill_empty_rows(sp_input, default_value, name=None):
"""Fills empty rows in the input 2-D `SparseTensor` with a default value.
This op adds entries with the specified `default_value` at index
`[row, 0]` for any row in the input that does not already have a value.
For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
[0, 1]: a
[0, 3]: b
[1, 0]: default_value
[2, 0]: c
[3, 1]: d
[4, 0]: default_value
Note that the input may have empty columns at the end, with no effect on
this op.
The output `SparseTensor` will be in row-major order and will have the
same shape as the input.
This op also returns an indicator vector such that
empty_row_indicator[i] = True iff row i was an empty row.
Args:
sp_input: A `SparseTensor` with shape `[N, M]`.
default_value: The value to fill for empty rows, with the same type as
`sp_input.`
name: A name prefix for the returned tensors (optional)
Returns:
sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty
rows filled in with `default_value`.
empty_row_indicator: A bool vector of length `N` indicating whether each
input row was empty.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
if not isinstance(sp_input, ops.SparseTensor):
raise TypeError("Input must be a SparseTensor")
with ops.op_scope([sp_input], name, "SparseFillEmptyRows"):
default_value = ops.convert_to_tensor(default_value,
dtype=sp_input.values.dtype)
num_rows = math_ops.cast(sp_input.shape[0], dtypes.int32)
all_row_indices = math_ops.cast(math_ops.range(num_rows), dtypes.int64)
empty_row_indices, _ = array_ops.list_diff(all_row_indices,
sp_input.indices[:, 0])
empty_row_indicator = sparse_to_dense(
empty_row_indices, array_ops.expand_dims(sp_input.shape[0], -1), True,
False)
empty_row_indices_as_column = array_ops.reshape(empty_row_indices, [-1, 1])
additional_indices = array_ops.concat(
1, [empty_row_indices_as_column,
array_ops.zeros_like(empty_row_indices_as_column)])
additional_values = array_ops.fill(
array_ops.shape(empty_row_indices), default_value)
all_indices_unordered = array_ops.concat(0, [sp_input.indices,
additional_indices])
all_values_unordered = array_ops.concat(0, [sp_input.values,
additional_values])
sp_unordered_output = ops.SparseTensor(all_indices_unordered,
all_values_unordered, sp_input.shape)
sp_ordered_output = sparse_reorder(sp_unordered_output)
return sp_ordered_output, empty_row_indicator
def serialize_sparse(sp_input, name=None):
"""Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional).
Returns:
A string 3-vector (1D `Tensor`), with each column representing the
serialized `SparseTensor`'s indices, values, and shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
if not isinstance(sp_input, ops.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return gen_sparse_ops._serialize_sparse(
sp_input.indices,
sp_input.values,
sp_input.shape,
name=name)
@ops.RegisterShape("SerializeSparse")
def _SerializeSparseShape(op): # pylint: disable=invalid-name
"""Shape function for SerializeSparse op."""
op.inputs[0].get_shape().with_rank(2)
op.inputs[1].get_shape().with_rank(1)
op.inputs[2].get_shape().with_rank(1)
return [tensor_shape.vector(3)]
def serialize_many_sparse(sp_input, name=None):
"""Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
name: A name prefix for the returned tensors (optional).
Returns:
A string matrix (2-D `Tensor`) with `N` rows and `3` columns.
Each column represents serialized `SparseTensor`'s indices, values, and
shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
if not isinstance(sp_input, ops.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return gen_sparse_ops._serialize_many_sparse(
sp_input.indices,
sp_input.values,
sp_input.shape,
name=name)
@ops.RegisterShape("SerializeManySparse")
def _SerializeManySparseShape(op): # pylint: disable=invalid-name
"""Shape function for SerializeSparse op."""
op.inputs[0].get_shape().with_rank(2)
op.inputs[1].get_shape().with_rank(1)
op.inputs[2].get_shape().with_rank(1)
return [tensor_shape.matrix(None, 3)]
def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`serialize_sparse`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`.
The serialized and packed `SparseTensor' objects.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops._deserialize_many_sparse(
serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return ops.SparseTensor(output_indices, output_values, output_shape)
@ops.RegisterShape("DeserializeManySparse")
def _DeserializeSparseShape(op): # pylint: disable=invalid-name
"""Shape function for DeserializeManySparse op."""
serialized_sparse_shape = op.inputs[0].get_shape().with_rank(2)
serialized_sparse_shape.merge_with(
tensor_shape.TensorShape([None, 3]))
return [tensor_shape.matrix(None, None),
tensor_shape.vector(None),
tensor_shape.vector(None)]
def sparse_tensor_dense_matmul(sp_a, b, adjoint_a=False, adjoint_b=False,
name=None):
# pylint: disable=line-too-long
"""Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
No validity checking is performed on the indices of A. However, the following
input format is recommended for optimal behavior:
if adjoint_a == false:
A should be sorted in lexicographically increasing order. Use
sparse_reorder if you're not sure.
if adjoint_a == true:
A should be sorted in order of increasing dimension 1 (i.e., "column major"
order instead of "row major" order).
Deciding when to use sparse_tensor_dense_matmul vs. matmul(sp_a=True):
There are a number of questions to ask in the decision process, including:
* Will the SparseTensor A fit in memory if densified?
* Is the column count of the product large (>> 1)?
* Is the density of A larger than approximately 15%?
If the answer to several of these questions is yes, consider
converting the SparseTensor to a dense one and using tf.matmul with sp_a=True.
This operation tends to perform well when A is more sparse, if the column size
of the product is small (e.g. matrix-vector multiplication), if sp_a.shape
takes on large values.
Below is a rough speed comparison between sparse_tensor_dense_matmul,
labelled 'sparse', and matmul(sp_a=True), labelled 'dense'. For purposes of
the comparison, the time spent converting from a SparseTensor to a dense
Tensor is not included, so it is overly conservative with respect to
the time ratio.
Benchmark system:
CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB
GPU: NVidia Tesla k40c
Compiled with:
-c opt --config=cuda --copt=-mavx
```tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks
A sparse [m, k] with % nonzero values between 1% and 80%
B dense [k, n]
% nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense)
0.01 1 True 100 100 0.000221166 0.00010154 0.459112
0.01 1 True 100 1000 0.00033858 0.000109275 0.322745
0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385
0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669
0.01 1 False 100 100 0.000208085 0.000107603 0.51711
0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762
0.01 1 False 1000 100 0.000308222 0.00010345 0.335635
0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124
0.01 10 True 100 100 0.000218522 0.000105537 0.482958
0.01 10 True 100 1000 0.000340882 0.000111641 0.327506
0.01 10 True 1000 100 0.000315472 0.000117376 0.372064
0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128
0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354
0.01 10 False 100 1000 0.000330552 0.000112615 0.340687
0.01 10 False 1000 100 0.000341277 0.000114097 0.334324
0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549
0.01 25 True 100 100 0.000207806 0.000105977 0.509981
0.01 25 True 100 1000 0.000322879 0.00012921 0.400181
0.01 25 True 1000 100 0.00038262 0.000141583 0.370035
0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504
0.01 25 False 100 100 0.000209401 0.000104696 0.499979
0.01 25 False 100 1000 0.000321161 0.000130737 0.407076
0.01 25 False 1000 100 0.000377012 0.000136801 0.362856
0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413
0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833
0.2 1 True 100 1000 0.000348674 0.000147475 0.422959
0.2 1 True 1000 100 0.000336908 0.00010122 0.300439
0.2 1 True 1000 1000 0.001022 0.000203274 0.198898
0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746
0.2 1 False 100 1000 0.000356127 0.000146824 0.41228
0.2 1 False 1000 100 0.000322664 0.000100918 0.312764
0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648
0.2 10 True 100 100 0.000211692 0.000109903 0.519165
0.2 10 True 100 1000 0.000372819 0.000164321 0.440753
0.2 10 True 1000 100 0.000338651 0.000144806 0.427596
0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064
0.2 10 False 100 100 0.000215727 0.000110502 0.512231
0.2 10 False 100 1000 0.000375419 0.0001613 0.429653
0.2 10 False 1000 100 0.000336999 0.000145628 0.432132
0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618
0.2 25 True 100 100 0.000218705 0.000129913 0.594009
0.2 25 True 100 1000 0.000394794 0.00029428 0.745402
0.2 25 True 1000 100 0.000404483 0.0002693 0.665788
0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052
0.2 25 False 100 100 0.000221494 0.0001306 0.589632
0.2 25 False 100 1000 0.000396436 0.000297204 0.74969
0.2 25 False 1000 100 0.000409346 0.000270068 0.659754
0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046
0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836
0.5 1 True 100 1000 0.000415328 0.000223073 0.537101
0.5 1 True 1000 100 0.000358324 0.00011269 0.314492
0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851
0.5 1 False 100 100 0.000224196 0.000101423 0.452386
0.5 1 False 100 1000 0.000400987 0.000223286 0.556841
0.5 1 False 1000 100 0.000368825 0.00011224 0.304318
0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563
0.5 10 True 100 100 0.000222125 0.000112308 0.505608
0.5 10 True 100 1000 0.000461088 0.00032357 0.701753
0.5 10 True 1000 100 0.000394624 0.000225497 0.571422
0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801
0.5 10 False 100 100 0.000232083 0.000114978 0.495418
0.5 10 False 100 1000 0.000454574 0.000324632 0.714146
0.5 10 False 1000 100 0.000379097 0.000227768 0.600817
0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638
0.5 25 True 100 100 0.00023429 0.000151703 0.647501
0.5 25 True 100 1000 0.000497462 0.000598873 1.20386
0.5 25 True 1000 100 0.000460778 0.000557038 1.20891
0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845
0.5 25 False 100 100 0.000228981 0.000155334 0.678371
0.5 25 False 100 1000 0.000496139 0.000620789 1.25124
0.5 25 False 1000 100 0.00045473 0.000551528 1.21287
0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927
0.8 1 True 100 100 0.000222037 0.000105301 0.47425
0.8 1 True 100 1000 0.000410804 0.000329327 0.801664
0.8 1 True 1000 100 0.000349735 0.000131225 0.375212
0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633
0.8 1 False 100 100 0.000214079 0.000107486 0.502085
0.8 1 False 100 1000 0.000413746 0.000323244 0.781261
0.8 1 False 1000 100 0.000348983 0.000131983 0.378193
0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282
0.8 10 True 100 100 0.000229159 0.00011825 0.516017
0.8 10 True 100 1000 0.000498845 0.000532618 1.0677
0.8 10 True 1000 100 0.000383126 0.00029935 0.781336
0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689
0.8 10 False 100 100 0.000230783 0.000124958 0.541452
0.8 10 False 100 1000 0.000493393 0.000550654 1.11606
0.8 10 False 1000 100 0.000377167 0.000298581 0.791642
0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024
0.8 25 True 100 100 0.000233496 0.000175241 0.75051
0.8 25 True 100 1000 0.00055654 0.00102658 1.84458
0.8 25 True 1000 100 0.000463814 0.000783267 1.68875
0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132
0.8 25 False 100 100 0.000240243 0.000175047 0.728625
0.8 25 False 100 1000 0.000578102 0.00104499 1.80763
0.8 25 False 1000 100 0.000485113 0.000776849 1.60138
0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992
```
Args:
sp_a: SparseTensor A, of rank 2.
b: A dense Matrix with the same dtype as sp_a.
adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex,
this is transpose(conj(A)). Otherwise it's transpose(A).
adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex,
this is transpose(conj(B)). Otherwise it's transpose(B).
name: A name prefix for the returned tensors (optional)
Returns:
A dense matrix (pseudo-code in dense np.matrix notation):
A = A.H if adjoint_a else A
B = B.H if adjoint_b else B
return A*B
"""
# pylint: enable=line-too-long
if not isinstance(sp_a, ops.SparseTensor):
raise TypeError("sp_a must be a SparseTensor")
with ops.op_scope(
[sp_a.indices, sp_a.values, b], name, "SparseTensorDenseMatMul") as name:
b = ops.convert_to_tensor(b, name="b")
return gen_sparse_ops._sparse_tensor_dense_mat_mul(
a_indices=sp_a.indices,
a_values=sp_a.values,
a_shape=sp_a.shape,
b=b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
@ops.RegisterShape("SparseTensorDenseMatMul")
def _SparseTensorDenseMatMulShape(op): # pylint: disable=invalid-name
"""Shape function for SparseTensorDenseMatMul op."""
adjoint_b = op.get_attr("adjoint_b")
op.inputs[0].get_shape().assert_has_rank(2) # a_indices
op.inputs[1].get_shape().assert_has_rank(1) # a_values
op.inputs[2].get_shape().merge_with(tensor_shape.vector(2)) # a_shape
b_shape = op.inputs[3].get_shape().with_rank(2)
output_shape_right = b_shape[0] if adjoint_b else b_shape[1]
return [tensor_shape.matrix(None, output_shape_right)]
| |
import os
import uuid
from osipkd.tools import row2dict, xls_reader
from datetime import datetime
from sqlalchemy import not_, func
from sqlalchemy.sql.expression import and_
from ziggurat_foundations.models import groupfinder
from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
import colander
from deform import (
Form,
widget,
ValidationFailure,
)
from osipkd.models import (
DBSession,
Group
)
from osipkd.models.pemda import Unit as UnitModel, Urusan as UrusanModel, UserUnit as UserUnitModel
from datatables import ColumnDT, DataTables
from osipkd.views.base_view import BaseViews
SESS_ADD_FAILED = 'Tambah unit gagal'
SESS_EDIT_FAILED = 'Edit unit gagal'
class AddSchema(colander.Schema):
choices = DBSession.query(UrusanModel.id,
UrusanModel.nama).order_by(UrusanModel.nama).all()
kode = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=18))
urusan_id = colander.SchemaNode(
colander.Integer(),
widget = widget.SelectWidget(values=choices),
missing=colander.drop,)
nama = colander.SchemaNode(
colander.String())
kategori = colander.SchemaNode(
colander.String(),
missing=colander.drop,)
singkat = colander.SchemaNode(
colander.String(),
missing=colander.drop,)
disabled = colander.SchemaNode(
colander.Boolean())
class EditSchema(AddSchema):
id = colander.SchemaNode(colander.String(),
missing=colander.drop,
widget=widget.HiddenWidget(readonly=True))
class view_unit(BaseViews):
########
# List #
########
@view_config(route_name='unit', renderer='templates/unit/list.pt',
permission='read')
def view_list(self):
return dict(a={})
##########
# Action #
##########
@view_config(route_name='unit-act', renderer='json',
permission='view')
def gaji_unit_act(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
if url_dict['act']=='grid':
columns = []
columns.append(ColumnDT('id'))
columns.append(ColumnDT('kode'))
columns.append(ColumnDT('nama'))
columns.append(ColumnDT('disabled'))
groups = groupfinder(req.user, req)
ids = []
if req.user.id==1 or 'group:admin' in groups:
query = UnitModel.query() #DBSession.query(UnitModel)
else:
units = DBSession.query(UserUnitModel.unit_id,
UserUnitModel.sub_unit, UnitModel.kode
).join(UnitModel).filter(UnitModel.id==UserUnitModel.unit_id,
UserUnitModel.user_id==req.user.id).all()
for unit in units:
if unit.sub_unit:
rows = DBSession.query(UnitModel.id).filter(UnitModel.kode.ilike('%s%%' % unit.kode)).all()
else:
rows = DBSession.query(UnitModel.id).filter(UnitModel.kode==unit.kode).all()
for i in range(len(rows)):
ids.append(rows[i])
query = DBSession.query(UnitModel).filter((UnitModel.id).in_(ids))
rowTable = DataTables(req, UnitModel, query, columns)
return rowTable.output_result()
elif url_dict['act']=='changeid':
ids = UserUnitModel.unit_granted(req.user.id, params['unit_id'])
if req.user.id>1 and 'g:admin' not in groupfinder(req.user, req)\
and not ids:
return {'success':False, 'msg':'Anda tidak boleh mengubah ke unit yang bukan hak akses anda'}
row = UnitModel.get_by_id('unit_id' in params and params['unit_id'] or 0)
if row:
ses['unit_id']=row.id
ses['unit_kd']=row.kode
ses['unit_nm']=row.nama
return {'success':True, 'msg':'Sukses ubah SKPD'}
elif url_dict['act']=='headofnama':
term = 'term' in params and params['term'] or ''
rows = DBSession.query(UnitModel.id, UnitModel.kode, UnitModel.nama
).filter(
UnitModel.nama.ilike('%%%s%%' % term) ).all()
r = []
for k in rows:
d={}
d['id'] = k[0]
d['value'] = k[2]
d['kode'] = k[1]
d['nama'] = k[2]
r.append(d)
return r
elif url_dict['act']=='headofkode':
term = 'term' in params and params['term'] or ''
rows = DBSession.query(UnitModel.id, UnitModel.kode, UnitModel.nama
).filter(
UnitModel.kode.ilike('%%%s%%' % term) ).all()
r = []
for k in rows:
d={}
d['id'] = k[0]
d['value'] = k[1]
d['kode'] = k[1]
d['nama'] = k[2]
r.append(d)
return r
elif url_dict['act']=='import':
rows = DBSession.execute("""SELECT a.kode, a.nama, a.passwd, b.unit_id
FROM admin.users2 a
INNER JOIN admin.user_units2 b
ON a.id = b.id""").all()
for kode,nama,passwd, unit_id in rows:
user = Users()
user.user_name = nama
user.user_password = passwd
user.email = ''.join([nama,'@tangerangkab.org'])
user.status = 1
DBSession.add(user)
DBSession.flush()
if user.id:
user_unit=UserUnitModel()
user_unit.user_id = user.id
user_unit.unit_id = unit_id
user_unit.status = 1
DBSession.add(user_unit)
DBSession.flush()
#######
# Add #
#######
def form_validator(self, form, value):
if 'id' in form.request.matchdict:
uid = form.request.matchdict['id']
q = DBSession.query(UnitModel).filter_by(id=uid)
unit = q.first()
else:
unit = None
def get_form(self, class_form, row=None):
schema = class_form(validator=self.form_validator)
schema = schema.bind()
schema.request = self.request
if row:
schema.deserialize(row)
return Form(schema, buttons=('simpan','batal'))
def save(self, values, user, row=None):
if not row:
row = UnitModel()
row.created = datetime.now()
row.create_uid = user.id
row.from_dict(values)
row.updated = datetime.now()
row.update_uid = user.id
row.disabled = 'disabled' in values and values['disabled'] and 1 or 0
DBSession.add(row)
DBSession.flush()
return row
def save_request(self, values, row=None):
if 'id' in self.request.matchdict:
values['id'] = self.request.matchdict['id']
row = self.save(values, self.request.user, row)
self.request.session.flash('unit sudah disimpan.')
def route_list(self):
return HTTPFound(location=self.request.route_url('unit'))
def session_failed(self, session_name):
r = dict(form=self.session[session_name])
del self.session[session_name]
return r
@view_config(route_name='unit-add', renderer='templates/unit/add.pt',
permission='add')
def view_unit_add(self):
req = self.request
ses = self.session
form = self.get_form(AddSchema)
if req.POST:
if 'simpan' in req.POST:
controls = req.POST.items()
try:
c = form.validate(controls)
except ValidationFailure, e:
req.session[SESS_ADD_FAILED] = e.render()
return HTTPFound(location=req.route_url('unit-add'))
self.save_request(dict(controls))
return self.route_list()
elif SESS_ADD_FAILED in req.session:
return self.session_failed(SESS_ADD_FAILED)
return dict(form=form.render())
########
# Edit #
########
def query_id(self):
return DBSession.query(UnitModel).filter_by(id=self.request.matchdict['id'])
def id_not_found(self):
msg = 'unit ID %s Tidak Ditemukan.' % self.request.matchdict['id']
request.session.flash(msg, 'error')
return route_list()
@view_config(route_name='unit-edit', renderer='templates/unit/edit.pt',
permission='edit')
def view_unit_edit(self):
request = self.request
row = self.query_id().first()
if not row:
return id_not_found(request)
form = self.get_form(EditSchema)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
print controls
try:
c = form.validate(controls)
except ValidationFailure, e:
request.session[SESS_EDIT_FAILED] = e.render()
return HTTPFound(location=request.route_url('unit-edit',
id=row.id))
self.save_request(dict(controls), row)
return self.route_list()
elif SESS_EDIT_FAILED in request.session:
return self.session_failed(SESS_EDIT_FAILED)
values = row.to_dict()
for value in values:
if not values[value]:
values[value] = colander.null
return dict(form=form.render(appstruct=values))
##########
# Delete #
##########
@view_config(route_name='unit-delete', renderer='templates/unit/delete.pt',
permission='delete')
def view_unit_delete(self):
request = self.request
q = self.query_id()
row = q.first()
if not row:
return self.id_not_found(request)
form = Form(colander.Schema(), buttons=('hapus','batal'))
if request.POST:
if 'hapus' in request.POST:
msg = 'unit ID %d %s sudah dihapus.' % (row.id, row.nama)
try:
q.delete()
DBSession.flush()
except:
msg = 'unit ID %d %s tidak dapat dihapus.' % (row.id, row.nama)
request.session.flash(msg)
return self.route_list()
return dict(row=row,
form=form.render())
| |
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.conv2 = nn.Conv2d(512, 20, kernel_size=3, stride=1, padding=1)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.conv2(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
# x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| |
"""Code shared between the API classes."""
class Remote(object):
"""Base class for Nvim objects(buffer/window/tabpage).
Each type of object has it's own specialized class with API wrappers around
the msgpack-rpc session. This implements equality which takes the remote
object handle into consideration.
"""
def __eq__(self, other):
"""Return True if `self` and `other` are the same object."""
return (hasattr(other, 'code_data') and
other.code_data == self.code_data)
def __hash__(self):
"""Return hash based on remote object id."""
return self.code_data.__hash__()
class RemoteMap(object):
"""Represents a string->object map stored in Nvim.
This is the dict counterpart to the `RemoteSequence` class, but it is used
as a generic way of retrieving values from the various map-like data
structures present in Nvim.
It is used to provide a dict-like API to vim variables and options.
"""
def __init__(self, session, get_method, set_method, self_obj=None):
"""Initialize a RemoteMap with session, getter/setter and self_obj."""
self._get = _wrap(session, get_method, self_obj)
self._set = None
if set_method:
self._set = _wrap(session, set_method, self_obj)
def __getitem__(self, key):
"""Return a map value by key."""
return self._get(key)
def __setitem__(self, key, value):
"""Set a map value by key(if the setter was provided)."""
if not self._set:
raise TypeError('This dict is read-only')
self._set(key, value)
def __delitem__(self, key):
"""Delete a map value by associating None with the key."""
if not self._set:
raise TypeError('This dict is read-only')
return self._set(key, None)
def __contains__(self, key):
"""Check if key is present in the map."""
try:
self._get(key)
return True
except Exception:
return False
def get(self, key, default=None):
"""Return value for key if present, else a default value."""
try:
return self._get(key)
except Exception:
return default
class RemoteSequence(object):
"""Represents a sequence of objects stored in Nvim.
This class is used to wrap msgapck-rpc functions that work on Nvim
sequences(of lines, buffers, windows and tabpages) with an API that
is similar to the one provided by the python-vim interface.
For example, the 'buffers' property of the `Nvim class is a RemoteSequence
sequence instance, and the expression `nvim.buffers[0]` is translated to
session.request('vim_get_buffers')[0].
It can also receive an optional self_obj that will be passed as first
argument of the request. For example, `tabpage.windows[0]` is translated
to: session.request('tabpage_get_windows', tabpage_instance)[0].
One important detail about this class is that all methods will fetch the
sequence into a list and perform the necessary manipulation
locally(iteration, indexing, counting, etc).
"""
def __init__(self, session, method, self_obj=None):
"""Initialize a RemoteSequence with session, method and self_obj."""
self._fetch = _wrap(session, method, self_obj)
def __len__(self):
"""Return the length of the remote sequence."""
return len(self._fetch())
def __getitem__(self, idx):
"""Return a sequence item by index."""
if not isinstance(idx, slice):
return self._fetch()[idx]
return self._fetch()[idx.start:idx.stop]
def __iter__(self):
"""Return an iterator for the sequence."""
items = self._fetch()
for item in items:
yield item
def __contains__(self, item):
"""Check if an item is present in the sequence."""
return item in self._fetch()
def _identity(obj, session, method, kind):
return obj
class SessionHook(object):
"""Pair of functions to filter objects coming/going from/to Nvim.
Filter functions receive the following arguments:
- obj: The object to process
- session: The current session object
- method: The method name
- kind: Kind of filter, can be one of:
- 'request' for requests coming from Nvim
- 'notification' for notifications coming from Nvim
- 'out-request' for requests going to Nvim
Whatever is returned from the function is used as a replacement for `obj`.
This class also provides a `compose` method for composing hooks.
"""
def __init__(self, from_nvim=_identity, to_nvim=_identity):
"""Initialize a SessionHook with from/to filters."""
self.from_nvim = from_nvim
self.to_nvim = to_nvim
def compose(self, other):
"""Compose two SessionHook instances.
This works by composing the individual from/to filters and creating
a new SessionHook instance with the composed filters.
"""
def comp(f1, f2):
if f1 is _identity:
return f2
if f2 is _identity:
return f1
return lambda o, s, m, k: f1(f2(o, s, m, k), s, m, k)
return SessionHook(comp(other.from_nvim, self.from_nvim),
comp(other.to_nvim, self.to_nvim))
class DecodeHook(SessionHook):
"""SessionHook subclass that decodes utf-8 strings coming from Nvim.
This class is useful for python3, where strings are now unicode by
default(byte strings need to be prefixed with "b").
"""
def __init__(self, encoding='utf-8', encoding_errors='strict'):
"""Initialize with encoding and encoding errors policy."""
self.encoding = encoding
self.encoding_errors = encoding_errors
super(DecodeHook, self).__init__(from_nvim=self._decode_if_bytes)
def _decode_if_bytes(self, obj, session, method, kind):
if isinstance(obj, bytes):
return obj.decode(self.encoding, errors=self.encoding_errors)
return obj
def walk(self, obj):
"""Decode bytes found in obj (any msgpack object).
Uses encoding and policy specified in constructor.
"""
return walk(self._decode_if_bytes, obj, None, None, None)
class SessionFilter(object):
"""Wraps a session-like object with a SessionHook instance.
This class can be used as a drop-in replacement for a sessions, the
difference is that a hook is applied to all data passing through a
SessionFilter instance.
"""
def __init__(self, session, hook):
"""Initialize with a Session(or SessionFilter) and a hook.
If `session` is already a SessionFilter, it's hook will be extracted
and composed with `hook`.
"""
if isinstance(session, SessionFilter):
self._hook = session._hook.compose(hook)
self._session = session._session
else:
self._hook = hook
self._session = session
# Both filters are applied to `walk` so objects are transformed
# recursively
self._in = self._hook.from_nvim
self._out = self._hook.to_nvim
def threadsafe_call(self, fn, *args, **kwargs):
"""Wrapper for Session.threadsafe_call."""
self._session.threadsafe_call(fn, *args, **kwargs)
def next_message(self):
"""Wrapper for Session.next_message."""
msg = self._session.next_message()
if msg:
return walk(self._in, msg, self, msg[1], msg[0])
def request(self, name, *args, **kwargs):
"""Wrapper for Session.request."""
args = walk(self._out, args, self, name, 'out-request')
return walk(self._in, self._session.request(name, *args, **kwargs),
self, name, 'out-request')
def run(self, request_cb, notification_cb, setup_cb=None):
"""Wrapper for Session.run."""
def filter_request_cb(name, args):
result = request_cb(self._in(name, self, name, 'request'),
walk(self._in, args, self, name, 'request'))
return walk(self._out, result, self, name, 'request')
def filter_notification_cb(name, args):
notification_cb(self._in(name, self, name, 'notification'),
walk(self._in, args, self, name, 'notification'))
self._session.run(filter_request_cb, filter_notification_cb, setup_cb)
def stop(self):
"""Wrapper for Session.stop."""
self._session.stop()
def walk(fn, obj, *args):
"""Recursively walk an object graph applying `fn`/`args` to objects."""
if type(obj) in [list, tuple]:
return list(walk(fn, o, *args) for o in obj)
if type(obj) is dict:
return dict((walk(fn, k, *args), walk(fn, v, *args)) for k, v in
obj.items())
return fn(obj, *args)
def _wrap(session, method, self_obj):
if self_obj is not None:
return lambda *args: session.request(method, self_obj, *args)
else:
return lambda *args: session.request(method, *args)
| |
# --------------------------------------------------------
# Scene Graph Generation by Iterative Message Passing
# Licensed under The MIT License [see LICENSE for details]
# Written by Danfei Xu
# --------------------------------------------------------
import tensorflow as tf
from networks.network import Network
import losses
from fast_rcnn.config import cfg
import net_utils as utils
"""
A TensorFlow implementation of the scene graph generation models introduced in
"Scene Graph Generation by Iterative Message Passing" by Xu et al.
"""
class basenet(Network):
def __init__(self, data):
self.inputs = []
self.data = data
self.ims = data['ims']
self.rois = data['rois']
self.iterable = False
self.keep_prob = tf.placeholder(tf.float32)
self.layers = {}
def _vgg16(self):
self.layers = dict({'ims': self.ims, 'rois': self.rois})
self._vgg_conv()
self._vgg_fc()
def _vgg_conv(self):
(self.feed('ims')
.conv(3, 3, 64, 1, 1, name='conv1_1')
.conv(3, 3, 64, 1, 1, name='conv1_2')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1')
.conv(3, 3, 128, 1, 1, name='conv2_2')
.max_pool(2, 2, 2, 2, name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1')
.conv(3, 3, 256, 1, 1, name='conv3_2')
.conv(3, 3, 256, 1, 1, name='conv3_3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1')
.conv(3, 3, 512, 1, 1, name='conv4_2')
.conv(3, 3, 512, 1, 1, name='conv4_3')
.max_pool(2, 2, 2, 2, name='pool4')
.conv(3, 3, 512, 1, 1, name='conv5_1')
.conv(3, 3, 512, 1, 1, name='conv5_2')
.conv(3, 3, 512, 1, 1, name='conv5_3')
.stop_gradient(name='conv_out'))
def _vgg_fc(self):
(self.feed('conv_out', 'rois')
.roi_pool(7, 7, 1.0/16, name='pool5')
.fc(4096, name='fc6')
.dropout(self.keep_prob, name='drop6')
.fc(4096, name='fc7')
.dropout(self.keep_prob, name='vgg_out'))
def _union_rel_vgg_fc(self):
(self.feed('conv_out', 'rel_rois')
.roi_pool(7, 7, 1.0/16, name='rel_pool5')
.fc(4096, name='rel_fc6')
.dropout(self.keep_prob, name='rel_drop6')
.fc(4096, name='rel_fc7')
.dropout(self.keep_prob, name='rel_vgg_out'))
# predictions
def _cls_pred(self, input_layer, layer_suffix='', reuse=False, new_var=False):
layer_name = 'cls_score'+layer_suffix if new_var else 'cls_score'
print(layer_name)
(self.feed(input_layer)
.fc(self.data['num_classes'], relu=False, name=layer_name,
reuse=reuse)
.softmax(name='cls_prob'+layer_suffix)
.argmax(1, name='cls_pred'+layer_suffix))
def _bbox_pred(self, input_layer, layer_suffix='', reuse=False, new_var=False):
layer_name = 'bbox_pred'+layer_suffix if new_var else 'bbox_pred'
(self.feed(input_layer)
.fc(self.data['num_classes']*4, relu=False, name=layer_name,
reuse=reuse))
def _rel_pred(self, input_layer, layer_suffix='', reuse=False, new_var=False):
layer_name = 'rel_score'+layer_suffix if new_var else 'rel_score'
(self.feed(input_layer)
.fc(self.data['num_predicates'], relu=False, name=layer_name,
reuse=reuse)
.softmax(name='rel_prob'+layer_suffix)
.argmax(1, name='rel_pred'+layer_suffix))
# Losses
def _sg_losses(self, ops={}, suffix=''):
ops = self._frc_losses(ops, suffix)
rel_score = self.get_output('rel_score'+suffix)
ops['loss_rel'+suffix] = losses.sparse_softmax(rel_score, self.data['predicates'],
name='rel_loss'+suffix, ignore_bg=True)
return ops
def _frc_losses(self, ops={}, suffix=''):
# classification loss
cls_score = self.get_output('cls_score'+suffix)
ops['loss_cls'+suffix] = losses.sparse_softmax(cls_score, self.data['labels'], name='cls_loss'+suffix)
# bounding box regression L1 loss
if cfg.TRAIN.BBOX_REG:
bbox_pred = self.get_output('bbox_pred'+suffix)
ops['loss_box'+suffix] = losses.l1_loss(bbox_pred, self.data['bbox_targets'], 'reg_loss'+suffix,
self.data['bbox_inside_weights'])
else:
print('NO BBOX REGRESSION!!!!!')
return ops
def cls_pred_output(self, iters=None):
if iters is not None:
op = {}
for i in iters:
if self.iterable and i != self.n_iter - 1:
op[i] = self.get_output('cls_prob_iter%i' % i)
else:
op[i] = self.get_output('cls_prob')
else:
op = self.get_output('cls_prob')
return op
def bbox_pred_output(self, iters=None):
if iters is not None:
op = {}
for i in iters:
op[i] = self.get_output('bbox_pred')
else:
op = self.get_output('bbox_pred')
return op
def rel_pred_output(self, iters=None):
if iters is not None:
op = {}
for i in iters:
if self.iterable and i != self.n_iter - 1:
op[i] = self.get_output('rel_prob_iter%i' % i)
else:
op[i] = self.get_output('rel_prob')
else:
op = self.get_output('rel_prob')
return op
class dual_graph_vrd(basenet):
def __init__(self, data):
basenet.__init__(self, data)
self.num_roi = data['num_roi']
self.num_rel = data['num_rel']
self.rel_rois = data['rel_rois']
self.iterable = True
self.edge_mask_inds = data['rel_mask_inds']
self.edge_segment_inds = data['rel_segment_inds']
self.edge_pair_mask_inds = data['rel_pair_mask_inds']
self.edge_pair_segment_inds = data['rel_pair_segment_inds']
# number of refine iterations
self.n_iter = data['n_iter']
self.relations = data['relations']
self.vert_state_dim = 512
self.edge_state_dim = 512
def setup(self):
self.layers = dict({'ims': self.ims, 'rois': self.rois, 'rel_rois': self.rel_rois})
self._vgg_conv()
self._vgg_fc()
self._union_rel_vgg_fc()
self._cells()
self._iterate()
def _cells(self):
"""
construct RNN cells and states
"""
# intiialize lstms
self.vert_rnn = tf.nn.rnn_cell.GRUCell(self.vert_state_dim, activation=tf.tanh)
self.edge_rnn = tf.nn.rnn_cell.GRUCell(self.edge_state_dim, activation=tf.tanh)
# lstm states
self.vert_state = self.vert_rnn.zero_state(self.num_roi, tf.float32)
self.edge_state = self.edge_rnn.zero_state(self.num_rel, tf.float32)
def _iterate(self):
(self.feed('vgg_out')
.fc(self.vert_state_dim, relu=False, name='vert_unary'))
(self.feed('rel_vgg_out')
.fc(self.edge_state_dim, relu=True, name='edge_unary'))
vert_unary = self.get_output('vert_unary')
edge_unary = self.get_output('edge_unary')
vert_factor = self._vert_rnn_forward(vert_unary, reuse=False)
edge_factor = self._edge_rnn_forward(edge_unary, reuse=False)
for i in xrange(self.n_iter):
reuse = i > 0
# compute edge states
edge_ctx = self._compute_edge_context(vert_factor, edge_factor, reuse=reuse)
edge_factor = self._edge_rnn_forward(edge_ctx, reuse=True)
# compute vert states
vert_ctx = self._compute_vert_context(edge_factor, vert_factor, reuse=reuse)
vert_factor = self._vert_rnn_forward(vert_ctx, reuse=True)
vert_in = vert_factor
edge_in = edge_factor
self._update_inference(vert_in, edge_in, i)
def _compute_edge_context_hard(self, vert_factor, reduction_mode='max'):
"""
max or average message pooling
"""
if reduction_mode=='max':
return tf.reduce_max(tf.gather(vert_factor, self.relations), [1])
elif reduction_mode=='mean':
return tf.reduce_mean(tf.gather(vert_factor, self.relations), [1])
def _compute_vert_context_hard(self, edge_factor, vert_factor, reduction_mode='max'):
"""
max or average message pooling
"""
edge_factor_gathered = utils.pad_and_gather(edge_factor, self.edge_mask_inds, None)
vert_ctx = utils.padded_segment_reduce(edge_factor_gathered, self.edge_segment_inds,
vert_factor.get_shape()[0], reduction_mode)
return vert_ctx
def _compute_edge_context_soft(self, vert_factor, edge_factor, reuse=False):
"""
attention-based edge message pooling
"""
vert_pairs = utils.gather_vec_pairs(vert_factor, self.relations)
sub_vert, obj_vert = tf.split(split_dim=1, num_split=2, value=vert_pairs)
sub_vert_w_input = tf.concat(concat_dim=1, values=[sub_vert, edge_factor])
obj_vert_w_input = tf.concat(concat_dim=1, values=[obj_vert, edge_factor])
# compute compatibility scores
(self.feed(sub_vert_w_input)
.fc(1, relu=False, reuse=reuse, name='sub_vert_w_fc')
.sigmoid(name='sub_vert_score'))
(self.feed(obj_vert_w_input)
.fc(1, relu=False, reuse=reuse, name='obj_vert_w_fc')
.sigmoid(name='obj_vert_score'))
sub_vert_w = self.get_output('sub_vert_score')
obj_vert_w = self.get_output('obj_vert_score')
weighted_sub = tf.mul(sub_vert, sub_vert_w)
weighted_obj = tf.mul(obj_vert, obj_vert_w)
return weighted_sub + weighted_obj
def _compute_vert_context_soft(self, edge_factor, vert_factor, reuse=False):
"""
attention-based vertex(node) message pooling
"""
out_edge = utils.pad_and_gather(edge_factor, self.edge_pair_mask_inds[:,0])
in_edge = utils.pad_and_gather(edge_factor, self.edge_pair_mask_inds[:,1])
# gather correspounding vert factors
vert_factor_gathered = tf.gather(vert_factor, self.edge_pair_segment_inds)
# concat outgoing edges and ingoing edges with gathered vert_factors
out_edge_w_input = tf.concat(concat_dim=1, values=[out_edge, vert_factor_gathered])
in_edge_w_input = tf.concat(concat_dim=1, values=[in_edge, vert_factor_gathered])
# compute compatibility scores
(self.feed(out_edge_w_input)
.fc(1, relu=False, reuse=reuse, name='out_edge_w_fc')
.sigmoid(name='out_edge_score'))
(self.feed(in_edge_w_input)
.fc(1, relu=False, reuse=reuse, name='in_edge_w_fc')
.sigmoid(name='in_edge_score'))
out_edge_w = self.get_output('out_edge_score')
in_edge_w = self.get_output('in_edge_score')
# weight the edge factors with computed weigths
out_edge_weighted = tf.mul(out_edge, out_edge_w)
in_edge_weighted = tf.mul(in_edge, in_edge_w)
edge_sum = out_edge_weighted + in_edge_weighted
vert_ctx = tf.segment_sum(edge_sum, self.edge_pair_segment_inds)
return vert_ctx
def _vert_rnn_forward(self, vert_in, reuse=False):
with tf.variable_scope('vert_rnn'):
if reuse: tf.get_variable_scope().reuse_variables()
(vert_out, self.vert_state) = self.vert_rnn(vert_in, self.vert_state)
return vert_out
def _edge_rnn_forward(self, edge_in, reuse=False):
with tf.variable_scope('edge_rnn'):
if reuse: tf.get_variable_scope().reuse_variables()
(edge_out, self.edge_state) = self.edge_rnn(edge_in, self.edge_state)
return edge_out
def _update_inference(self, vert_factor, edge_factor, iter_i):
# make predictions
reuse = iter_i > 0 # reuse variables
iter_suffix = '_iter%i' % iter_i if iter_i < self.n_iter - 1 else ''
self._cls_pred(vert_factor, layer_suffix=iter_suffix, reuse=reuse)
self._bbox_pred(vert_factor, layer_suffix=iter_suffix, reuse=reuse)
self._rel_pred(edge_factor, layer_suffix=iter_suffix, reuse=reuse)
def losses(self):
return self._sg_losses()
class vrd(basenet):
"""
Baseline: the visual relation detection module proposed by
Lu et al.
"""
def __init__(self, data):
basenet.__init__(self, data)
self.rel_rois = data['rel_rois']
def setup(self):
self.layers = dict({'ims': self.ims, 'rois': self.rois, 'rel_rois': self.rel_rois})
self._vgg_conv()
self._vgg_fc()
self._union_rel_vgg_fc()
self._cls_pred('vgg_out')
self._bbox_pred('vgg_out')
self._rel_pred('rel_vgg_out')
def losses(self):
return self._sg_losses()
class dual_graph_vrd_maxpool(dual_graph_vrd):
"""
Baseline: context-pooling by max pooling
"""
def _compute_edge_context(self, vert_factor, edge_factor, reuse):
return self._compute_edge_context_hard(vert_factor, reduction_mode='max')
def _compute_vert_context(self, edge_factor, vert_factor, reuse):
return self._compute_vert_context_hard(edge_factor, vert_factor, reduction_mode='max')
class dual_graph_vrd_avgpool(dual_graph_vrd):
"""
Baseline: context-pooling by avg. pooling
"""
def _compute_edge_context(self, vert_factor, edge_factor, reuse):
return self._compute_edge_context_hard(vert_factor, reduction_mode='mean')
def _compute_vert_context(self, edge_factor, vert_factor, reuse):
return self._compute_vert_context_hard(edge_factor, vert_factor, reduction_mode='mean')
class dual_graph_vrd_final(dual_graph_vrd):
"""
Our final model: context-pooling by attention
"""
def _compute_edge_context(self, vert_factor, edge_factor, reuse):
return self._compute_edge_context_soft(vert_factor, edge_factor, reuse)
def _compute_vert_context(self, edge_factor, vert_factor, reuse):
return self._compute_vert_context_soft(edge_factor, vert_factor, reuse)
| |
#
# frontend.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import silvereye
from constants import *
from product import *
from flags import flags
import isys
import iutil
import os
import re
import shutil
import types
import urlgrabber
from kickstart import AnacondaKSScript
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
class ImageProgress(object):
def __init__(self, progressWindow, status):
self.progressWindow = progressWindow
self.status = status
self.data = ''
def imageProgress(data, callback_data=None):
if not callback_data:
return
callback_data.data += data
lines = callback_data.data.split('\n')
m = re.match('.*Installing:\s+(\S+)\s+.*\[\s*(\d+)/(\d+)\].*', lines[-1])
if not m:
if len(lines) == 1:
return
m = re.match('.*Installing:\s+(\S+)\s+.*\[\s*(\d+)/(\d+)\].*', lines[-2])
if not m:
# TODO: Report other progress than just package installs
return
(pkg, cur, tot) = m.groups()[0:3]
callback_data.progressWindow.set(100 * int(cur) / int(tot))
if callback_data.status is not None:
callback_data.status.set_text('Installing %s (%s of %s)' % (pkg, cur, tot))
class InstallClass(silvereye.InstallClass):
# name has underscore used for mnemonics, strip if you dont need it
id = "silvereyefrontendparent"
name = N_("Silvereye Eucalyptus Front End Installer Parent")
_description = N_("The default installation of %s is a 'Cloud in a Box'"
"install. You can optionally select a different set of"
"software now.")
_descriptionFields = (productName,)
sortPriority = 10999
hidden = 1
bootloaderTimeoutDefault = 5
bootloaderExtraArgs = ["crashkernel=auto"]
tasks = [(N_("Eucalyptus Front-end Only"),
["core", "eucalyptus-cloud-controller",
"eucalyptus-storage-controller", "eucalyptus-walrus",
"eucalyptus-cluster-controller"]),
]
def setGroupSelection(self, anaconda):
silvereye.InstallClass.setGroupSelection(self, anaconda)
map(lambda x: anaconda.backend.selectGroup(x),
["core", "eucalyptus-cloud-controller",
'X Window System', 'Desktop', 'Fonts'])
anaconda.backend.selectPackage("eucalyptus-sc")
anaconda.backend.selectPackage("eucalyptus-walrus")
anaconda.backend.selectPackage("eucalyptus-cc")
anaconda.backend.selectPackage("eucalyptus-load-balancer-image")
anaconda.backend.selectPackage("unzip")
anaconda.backend.selectPackage("livecd-tools")
anaconda.backend.selectPackage("firefox")
anaconda.backend.selectPackage("nagios")
# Sigh, I guess we can't have nice things like wireless NICs
# https://eucalyptus.atlassian.net/browse/INST-124
anaconda.backend.deselectPackage("NetworkManager")
anaconda.backend.deselectPackage("NetworkManager-gnome")
# For 3.2 and later
anaconda.backend.selectPackage("eucalyptus-console")
def setInstallData(self, anaconda):
silvereye.InstallClass.setInstallData(self, anaconda)
anaconda.id.firewall.portlist.extend([ '53:tcp',
'53:udp',
'67:udp',
'3260:tcp',
'8443:tcp',
'8772:tcp',
'8773:tcp',
'8774:tcp',
'8888:tcp'])
if flags.cmdline.has_key("eucaconf"):
try:
f = urlgrabber.urlopen(flags.cmdline["eucaconf"])
eucaconf = open('/tmp/eucalyptus.conf', 'w')
eucaconf.write(f.read())
f.close()
eucaconf.close()
except urlgrabber.grabber.URLGrabError as e:
if anaconda.intf:
rc = anaconda.intf.messageWindow( _("Warning! eucalyptus.conf download failed"),
_("The following error was encountered while"
" downloading the eucalyptus.conf file:\n\n%s" % e),
type="custom", custom_icon="warning",
custom_buttons=[_("_Exit"), _("_Install anyway")])
if not rc:
sys.exit(0)
else:
sys.exit(0)
else:
pass
def setSteps(self, anaconda):
silvereye.InstallClass.setSteps(self, anaconda)
if anaconda.id.displayMode == 'g':
from gui import stepToClass
stepToClass["frontend"] = ("frontend_gui", "FrontendInstallWindow")
else:
from text import stepToClasses
stepToClasses["frontend"] = ("frontend_text", "FrontendInstallWindow")
if not anaconda.isKickstart or not os.path.exists('/tmp/eucalyptus.conf'):
anaconda.dispatch.skipStep("frontend", skip = 0)
def postAction(self, anaconda):
silvereye.InstallClass.postAction(self, anaconda)
# XXX: use proper constants for path names
def copy_script(src, dest, mode=0770):
shutil.copyfile('/tmp/updates/scripts/%s' % src,
'%s%s' % (anaconda.rootPath, dest))
os.chmod('%s%s' % (anaconda.rootPath, dest), mode)
def copy_file(src, dest):
copy_script(src, dest, mode=0644)
copy_script('eucalyptus-frontend-config.sh',
'/usr/local/sbin/eucalyptus-frontend-config')
copy_script('eucalyptus-teardown',
'/usr/local/sbin/eucalyptus-teardown')
copy_script('install-unpacked-image.py',
'/usr/local/sbin/install-unpacked-image.py')
copy_script('eucalyptus-setup.init',
'/etc/init.d/eucalyptus-setup')
copy_script('register_cloud_start',
'/usr/local/sbin/register_cloud_start', mode=0755)
os.mkdir('%s/tmp/img' % anaconda.rootPath)
# EKI
shutil.copyfile('/tmp/updates/scripts/vmlinuz-kexec',
'%s/tmp/img/vmlinuz-kexec' % anaconda.rootPath)
# ERI
shutil.copyfile('/tmp/updates/scripts/initramfs-kexec',
'%s/tmp/img/initramfs-kexec' % anaconda.rootPath)
# Image kickstart
newks = open('%s/tmp/ks-centos6.cfg' % anaconda.rootPath, 'w')
ayum = anaconda.backend.ayum
for repo in ayum.repos.listEnabled():
newks.write('repo --name=%s --baseurl=%s\n' % (repo.name, repo.baseurl[0]))
for line in open('/tmp/updates/ks-centos6.cfg', 'r').readlines():
if line.startswith('repo '):
continue
newks.write(line)
newks.close()
# Image creation script
copy_script('ami_creator.py', '/tmp/ami_creator.py')
# XXX clean this up
bindmount = False
if ayum._baseRepoURL and ayum._baseRepoURL.startswith("file://"):
os.mkdir('/mnt/sysimage/mnt/source')
isys.mount('/mnt/source', '/mnt/sysimage/mnt/source', bindMount=True)
bindmount = True
# eucalyptus.conf fragment from config screen
w = anaconda.intf.progressWindow(_("Creating EMI"),
_("Creating an initial CentOS 6 EMI."), 100)
shutil.copyfile('/tmp/eucalyptus.conf',
'%s/etc/eucalyptus/eucalyptus.conf.anaconda' % anaconda.rootPath)
copy_script('eucalyptus-firstboot-final.py',
'/usr/share/firstboot/modules/eucalyptus-firstboot-final.py')
postscriptlines ="""
/usr/sbin/euca_conf --upgrade-conf /etc/eucalyptus/eucalyptus.conf.anaconda
chkconfig dnsmasq off
chkconfig eucalyptus-cloud off
chkconfig eucalyptus-setup on
"""
postscript = AnacondaKSScript(postscriptlines,
inChroot=True,
logfile='/root/frontend-ks-post.log',
type=KS_SCRIPT_POST)
postscript.run(anaconda.rootPath, flags.serial, anaconda.intf)
# TODO: Add status line for text mode
pkgstatus = None
if anaconda.id.displayMode == 'g':
import gtk
pkgstatus = gtk.Label("Preparing to install...")
w.window.child.add(pkgstatus)
pkgstatus.show()
messages = '/root/ami-creation.log'
rc = iutil.execWithCallback('/bin/sh' , ['-c', 'cd /tmp/img; /tmp/ami_creator.py -c /tmp/ks-centos6.cfg'],
stdin = messages, stdout = messages, stderr = messages,
root = '/mnt/sysimage', callback=imageProgress,
callback_data=ImageProgress(w, pkgstatus))
if bindmount:
isys.umount('/mnt/sysimage/mnt/source')
w.pop()
def __init__(self):
silvereye.InstallClass.__init__(self)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import pkgutil
import plistlib
import subprocess
from collections import namedtuple
from contextlib import contextmanager
from six import string_types
from pants.base.revision import Revision
from pants.java.util import execute_java
from pants.option.custom_types import dict_option
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import temporary_dir
from pants.util.memo import memoized_property
from pants.util.osutil import OS_ALIASES, normalize_os_name
logger = logging.getLogger(__name__)
class Distribution(object):
"""Represents a java distribution - either a JRE or a JDK installed on the local system.
In particular provides access to the distribution's binaries; ie: java while ensuring basic
constraints are met. For example a minimum version can be specified if you know need to compile
source code or run bytecode that exercise features only available in that version forward.
"""
class Error(Exception):
"""Indicates an invalid java distribution."""
@staticmethod
def _parse_java_version(name, version):
# Java version strings have been well defined since release 1.3.1 as defined here:
# http://www.oracle.com/technetwork/java/javase/versioning-naming-139433.html
# These version strings comply with semver except that the traditional pre-release semver
# slot (the 4th) can be delimited by an _ in the case of update releases of the jdk.
# We accommodate that difference here using lenient parsing.
# We also accommodate specification versions, which just have major and minor
# components; eg: `1.8`. These are useful when specifying constraints a distribution must
# satisfy; eg: to pick any 1.8 java distribution: '1.8' <= version <= '1.8.99'
if isinstance(version, string_types):
version = Revision.lenient(version)
if version and not isinstance(version, Revision):
raise ValueError('{} must be a string or a Revision object, given: {}'.format(name, version))
return version
@staticmethod
def _is_executable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def __init__(self, home_path=None, bin_path=None, minimum_version=None, maximum_version=None,
jdk=False):
"""Creates a distribution wrapping the given `home_path` or `bin_path`.
Only one of `home_path` or `bin_path` should be supplied.
:param string home_path: the path to the java distribution's home dir
:param string bin_path: the path to the java distribution's bin dir
:param minimum_version: a modified semantic version string or else a Revision object
:param maximum_version: a modified semantic version string or else a Revision object
:param bool jdk: ``True`` to require the distribution be a JDK vs a JRE
"""
if home_path and not os.path.isdir(home_path):
raise ValueError('The specified java home path is invalid: {}'.format(home_path))
if bin_path and not os.path.isdir(bin_path):
raise ValueError('The specified binary path is invalid: {}'.format(bin_path))
if not bool(home_path) ^ bool(bin_path):
raise ValueError('Exactly one of home path or bin path should be supplied, given: '
'home_path={} bin_path={}'.format(home_path, bin_path))
self._home = home_path
self._bin_path = bin_path or (os.path.join(home_path, 'bin') if home_path else '/usr/bin')
self._minimum_version = self._parse_java_version("minimum_version", minimum_version)
self._maximum_version = self._parse_java_version("maximum_version", maximum_version)
self._jdk = jdk
self._is_jdk = False
self._system_properties = None
self._version = None
self._validated_binaries = {}
@property
def jdk(self):
self.validate()
return self._is_jdk
@property
def system_properties(self):
"""Returns a dict containing the system properties of this java distribution."""
return dict(self._get_system_properties(self.java))
@property
def version(self):
"""Returns the distribution version.
Raises Distribution.Error if this distribution is not valid according to the configured
constraints.
"""
return self._get_version(self.java)
def find_libs(self, names):
"""Looks for jars in the distribution lib folder(s).
If the distribution is a JDK, both the `lib` and `jre/lib` dirs will be scanned.
The endorsed and extension dirs are not checked.
:param list names: jar file names
:return: list of paths to requested libraries
:raises: `Distribution.Error` if any of the jars could not be found.
"""
def collect_existing_libs():
def lib_paths():
yield os.path.join(self.home, 'lib')
if self.jdk:
yield os.path.join(self.home, 'jre', 'lib')
for name in names:
for path in lib_paths():
lib_path = os.path.join(path, name)
if os.path.exists(lib_path):
yield lib_path
break
else:
raise Distribution.Error('Failed to locate {} library'.format(name))
return list(collect_existing_libs())
@property
def home(self):
"""Returns the distribution JAVA_HOME."""
if not self._home:
home = self._get_system_properties(self.java)['java.home']
# The `jre/bin/java` executable in a JDK distribution will report `java.home` as the jre dir,
# so we check for this and re-locate to the containing jdk dir when present.
if os.path.basename(home) == 'jre':
jdk_dir = os.path.dirname(home)
if self._is_executable(os.path.join(jdk_dir, 'bin', 'javac')):
home = jdk_dir
self._home = home
return self._home
@property
def real_home(self):
"""Real path to the distribution java.home (resolving links)."""
return os.path.realpath(self.home)
@property
def java(self):
"""Returns the path to this distribution's java command.
If this distribution has no valid java command raises Distribution.Error.
"""
return self.binary('java')
def binary(self, name):
"""Returns the path to the command of the given name for this distribution.
For example: ::
>>> d = Distribution()
>>> jar = d.binary('jar')
>>> jar
'/usr/bin/jar'
>>>
If this distribution has no valid command of the given name raises Distribution.Error.
If this distribution is a JDK checks both `bin` and `jre/bin` for the binary.
"""
if not isinstance(name, string_types):
raise ValueError('name must be a binary name, given {} of type {}'.format(name, type(name)))
self.validate()
return self._validated_executable(name)
def validate(self):
"""Validates this distribution against its configured constraints.
Raises Distribution.Error if this distribution is not valid according to the configured
constraints.
"""
if self._validated_binaries:
return
with self._valid_executable('java') as java:
if self._minimum_version:
version = self._get_version(java)
if version < self._minimum_version:
raise self.Error('The java distribution at {} is too old; expecting at least {} and'
' got {}'.format(java, self._minimum_version, version))
if self._maximum_version:
version = self._get_version(java)
if version > self._maximum_version:
raise self.Error('The java distribution at {} is too new; expecting no older than'
' {} and got {}'.format(java, self._maximum_version, version))
# We might be a JDK discovered by the embedded jre `java` executable.
# If so reset the bin path to the true JDK home dir for full access to all binaries.
self._bin_path = os.path.join(self.home, 'bin')
try:
self._validated_executable('javac') # Calling purely for the check and cache side effects
self._is_jdk = True
except self.Error:
if self._jdk:
raise
def execute_java(self, *args, **kwargs):
return execute_java(*args, distribution=self, **kwargs)
def _get_version(self, java):
if not self._version:
self._version = self._parse_java_version('java.version',
self._get_system_properties(java)['java.version'])
return self._version
def _get_system_properties(self, java):
if not self._system_properties:
with temporary_dir() as classpath:
with open(os.path.join(classpath, 'SystemProperties.class'), 'w+') as fp:
fp.write(pkgutil.get_data(__name__, 'SystemProperties.class'))
cmd = [java, '-cp', classpath, 'SystemProperties']
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise self.Error('Failed to determine java system properties for {} with {} - exit code'
' {}: {}'.format(java, ' '.join(cmd), process.returncode, stderr))
props = {}
for line in stdout.split(os.linesep):
key, _, val = line.partition('=')
props[key] = val
self._system_properties = props
return self._system_properties
def _validate_executable(self, name):
def bin_paths():
yield self._bin_path
if self._is_jdk:
yield os.path.join(self.home, 'jre', 'bin')
for bin_path in bin_paths():
exe = os.path.join(bin_path, name)
if self._is_executable(exe):
return exe
raise self.Error('Failed to locate the {} executable, {} does not appear to be a'
' valid {} distribution'.format(name, self, 'JDK' if self._jdk else 'JRE'))
def _validated_executable(self, name):
exe = self._validated_binaries.get(name)
if not exe:
exe = self._validate_executable(name)
self._validated_binaries[name] = exe
return exe
@contextmanager
def _valid_executable(self, name):
exe = self._validate_executable(name)
yield exe
self._validated_binaries[name] = exe
def __repr__(self):
return ('Distribution({!r}, minimum_version={!r}, maximum_version={!r} jdk={!r})'.format(
self._bin_path, self._minimum_version, self._maximum_version, self._jdk))
class DistributionLocator(Subsystem):
"""Subsystem that knows how to look up a java Distribution."""
class Error(Distribution.Error):
"""Error locating a java distribution."""
class _Location(namedtuple('Location', ['home_path', 'bin_path'])):
"""Represents the location of a java distribution."""
@classmethod
def from_home(cls, home):
"""Creates a location given the JAVA_HOME directory.
:param string home: The path of the JAVA_HOME directory.
:returns: The java distribution location.
"""
return cls(home_path=home, bin_path=None)
@classmethod
def from_bin(cls, bin_path):
"""Creates a location given the `java` executable parent directory.
:param string bin_path: The parent path of the `java` executable.
:returns: The java distribution location.
"""
return cls(home_path=None, bin_path=bin_path)
options_scope = 'jvm-distributions'
_CACHE = {}
# The `/usr/lib/jvm` dir is a common target of packages built for redhat and debian as well as
# other more exotic distributions.
_JAVA_DIST_DIR = '/usr/lib/jvm'
_OSX_JAVA_HOME_EXE = '/usr/libexec/java_home'
@classmethod
def register_options(cls, register):
super(DistributionLocator, cls).register_options(register)
human_readable_os_aliases = ', '.join('{}: [{}]'.format(str(key), ', '.join(sorted(val)))
for key, val in OS_ALIASES.items())
register('--paths', advanced=True, type=dict_option,
help='Map of os names to lists of paths to jdks. These paths will be searched before '
'everything else (before the JDK_HOME, JAVA_HOME, PATH environment variables) '
'when locating a jvm to use. The same OS can be specified via several different '
'aliases, according to this map: {}'.format(human_readable_os_aliases))
@memoized_property
def _normalized_jdk_paths(self):
jdk_paths = self.get_options().paths or {}
normalized = {}
for name, paths in sorted(jdk_paths.items()):
rename = normalize_os_name(name)
if rename in normalized:
logger.warning('Multiple OS names alias to "{}"; combining results.'.format(rename))
normalized[rename].extend(paths)
else:
normalized[rename] = paths
return normalized
def all_jdk_paths(self):
"""Get all explicitly configured JDK paths.
:return: mapping of os name -> list of jdk_paths
:rtype: dict of string -> list of string
"""
return self._normalized_jdk_paths
def get_jdk_paths(self, os_name=None):
"""Get the list of explicitly configured JDK paths for this os.
:param os_name: Os name to lookup. If None, use the currently detected os name.
:return: Paths of explicitly configured JDK's from the --jvm-distribution-paths option
:rtype: list of strings
"""
jdk_paths = self._normalized_jdk_paths
if not jdk_paths:
return ()
if os_name is None:
os_name = os.uname()[0].lower()
os_name = normalize_os_name(os_name)
if os_name not in jdk_paths:
logger.warning('--jvm-distributions-paths was specified, but has no entry for "{}".'
.format(os_name))
return jdk_paths.get(os_name, ())
@classmethod
def java_path_locations(cls):
for location in cls.global_instance().get_jdk_paths():
yield cls._Location.from_home(location)
@classmethod
def cached(cls, minimum_version=None, maximum_version=None, jdk=False):
"""Finds a java distribution that meets the given constraints and returns it.
First looks for a cached version that was previously located, otherwise calls locate().
:param minimum_version: minimum jvm version to look for (eg, 1.7).
:param maximum_version: maximum jvm version to look for (eg, 1.7.9999).
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the Distribution.
:rtype: :class:`pants.java.distribution.Distribution`
"""
def scan_constraint_match():
# Convert strings to Revision objects for apples-to-apples comparison.
max_version = Distribution._parse_java_version("maximum_version", maximum_version)
min_version = Distribution._parse_java_version("minimum_version", minimum_version)
for dist in cls._CACHE.values():
if min_version and dist.version < min_version:
continue
if max_version and dist.version > max_version:
continue
if jdk and not dist.jdk:
continue
return dist
key = (minimum_version, maximum_version, jdk)
dist = cls._CACHE.get(key)
if not dist:
dist = scan_constraint_match()
if not dist:
dist = cls.locate(minimum_version=minimum_version, maximum_version=maximum_version, jdk=jdk)
cls._CACHE[key] = dist
return dist
@classmethod
def locate(cls, minimum_version=None, maximum_version=None, jdk=False):
"""Finds a java distribution that meets any given constraints and returns it.
First looks through the paths listed for this operating system in the --jvm-distributions-paths
map. Then looks in JDK_HOME and JAVA_HOME if defined, falling back to a search on the PATH.
Raises Distribution.Error if no suitable java distribution could be found.
:param minimum_version: minimum jvm version to look for (eg, 1.7).
:param maximum_version: maximum jvm version to look for (eg, 1.7.9999).
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the located Distribution.
:rtype: :class:`pants.java.distribution.Distribution`
"""
def search_path():
for location in cls.global_instance().java_path_locations():
yield location
for location in cls.environment_jvm_locations():
yield location
for location in filter(None, search_path()):
try:
dist = Distribution(home_path=location.home_path,
bin_path=location.bin_path,
minimum_version=minimum_version,
maximum_version=maximum_version,
jdk=jdk)
dist.validate()
logger.debug('Located {} for constraints: minimum_version {}, maximum_version {}, jdk {}'
.format(dist, minimum_version, maximum_version, jdk))
return dist
except (ValueError, Distribution.Error):
pass
raise cls.Error('Failed to locate a {} distribution with minimum_version {}, maximum_version {}'
.format('JDK' if jdk else 'JRE', minimum_version, maximum_version))
@classmethod
def _linux_java_homes(cls):
if os.path.isdir(cls._JAVA_DIST_DIR):
for path in os.listdir(cls._JAVA_DIST_DIR):
home = os.path.join(cls._JAVA_DIST_DIR, path)
if os.path.isdir(home):
yield cls._Location.from_home(home)
@classmethod
def _osx_java_homes(cls):
# OSX will have a java_home tool that can be used to locate a unix-compatible java home dir.
#
# See:
# https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/java_home.1.html
#
# The `--xml` output looks like so:
# <?xml version="1.0" encoding="UTF-8"?>
# <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
# "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
# <plist version="1.0">
# <array>
# <dict>
# ...
# <key>JVMHomePath</key>
# <string>/Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home</string>
# ...
# </dict>
# ...
# </array>
# </plist>
if os.path.exists(cls._OSX_JAVA_HOME_EXE):
try:
plist = subprocess.check_output([cls._OSX_JAVA_HOME_EXE, '--failfast', '--xml'])
for distribution in plistlib.readPlistFromString(plist):
home = distribution['JVMHomePath']
yield cls._Location.from_home(home)
except subprocess.CalledProcessError:
pass
@classmethod
def environment_jvm_locations(cls):
def env_home(home_env_var):
home = os.environ.get(home_env_var)
return cls._Location.from_home(home) if home else None
yield env_home('JDK_HOME')
yield env_home('JAVA_HOME')
for location in cls._linux_java_homes():
yield location
for location in cls._osx_java_homes():
yield location
search_path = os.environ.get('PATH')
if search_path:
for bin_path in search_path.strip().split(os.pathsep):
yield cls._Location.from_bin(bin_path)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# name: test_handler_list.py
# author: Harold Bradley III
# email: harold@bradleystudio.net
# created on: 02/19/2016
#
"""
Unittests for nwid.event.handler_list module.
"""
from __future__ import absolute_import
from nwid.event import EVENT_BUBBLE, EVENT_CAPTURE, HandlerList
from nwid.event.handler_list import HandlerListItem
def mock_callback():
return True
def mock_alt_callback():
return False
## Test data structure HandlerList ##
def test_HandlerList_can_be_initialized_with_an_item():
"""A HandlerList can be initialized with an item."""
handler_list = HandlerList(mock_callback, 1)
assert handler_list._list[0] == HandlerListItem(mock_callback, 1, None,
False)
def test_HandlerList_can_have_items_added():
"""A HandlerList can have items added using the 'add' method."""
handler_list = HandlerList()
handler_list.add(mock_callback, 1)
assert handler_list._list[0] == HandlerListItem(mock_callback, 1, None,
False)
def test_HandlerList_can_add_multiple_items():
"""A HanlderList should be able to hold multiple items and should respect
the order in which items were added."""
handler_list = HandlerList()
handler_list.add(mock_callback)
handler_list.add(mock_callback)
handler_list.add(mock_callback)
assert handler_list._list[0] == HandlerListItem(mock_callback, 50, None,
False)
assert handler_list._list[1] == HandlerListItem(mock_callback, 50, None,
False)
assert handler_list._list[2] == HandlerListItem(mock_callback, 50, None,
False)
def test_HandlerList_can_add_multiple_items_with_priorities():
"""A HanlderList should be able to hold multiple items and should respect
their priorities."""
handler_list = HandlerList()
handler_list.add(mock_callback, 3)
handler_list.add(mock_callback, 1)
handler_list.add(mock_callback, 2)
assert handler_list._list[0] == HandlerListItem(mock_callback, 1, None,
False)
assert handler_list._list[1] == HandlerListItem(mock_callback, 2, None,
False)
assert handler_list._list[2] == HandlerListItem(mock_callback, 3, None,
False)
def test_HandlerList_can_add_multiple_items_with_some_having_same_priority():
"""A HandlerList should be able to handle adding multiple items with some
having the same priority. The order when priority is the same is just the
order of execution."""
handler_list = HandlerList()
handler_list.add(mock_callback, 1)
handler_list.add(mock_callback, 5)
handler_list.add(mock_callback, 2)
handler_list.add(mock_alt_callback, 2)
assert handler_list._list[0] == HandlerListItem(mock_callback, 1, None,
False)
assert handler_list._list[1] == HandlerListItem(mock_callback, 2, None,
False)
assert handler_list._list[2] == HandlerListItem(mock_alt_callback, 2,
None, False)
assert handler_list._list[3] == HandlerListItem(mock_callback, 5, None,
False)
def test_HandlerList_can_add_items_without_a_priority():
"""A HandlerList can add items without a priority. (It defaults to 50)."""
handler_list = HandlerList()
handler_list.add(mock_callback)
handler_list.add(mock_callback)
assert handler_list._list[0].priority == 50
assert handler_list._list[1].priority == 50
def test_HandlerList_can_add_items_with_an_identifier():
"""A HandlerList can add items with a string identifier."""
handler_list = HandlerList()
handler_list.add(mock_callback, 1, 'first-item')
handler_list.add(mock_callback, 2, 'second-item')
assert handler_list._list[0].identifier == 'first-item'
assert handler_list._list[1].identifier == 'second-item'
def test_HandlerList_can_add_items_with_a_propagation_method():
"""A HandlerList can add items with a propagation method type."""
handler_list = HandlerList()
handler_list.add(mock_callback, 1, method=EVENT_CAPTURE)
handler_list.add(mock_callback, 2, method=EVENT_BUBBLE)
handler_list.add(mock_callback, 3, method=True)
handler_list.add(mock_callback, 4, method=False)
assert handler_list._list[0].method == EVENT_CAPTURE
assert handler_list._list[1].method == EVENT_BUBBLE
assert handler_list._list[2].method == True
assert handler_list._list[3].method == False
def test_HandlerList_can_tell_its_len():
"""A HandlerList should be able to return its length."""
handler_list = HandlerList()
handler_list.add(mock_callback, 1)
handler_list.add(mock_callback, 2)
assert len(handler_list) == 2
def test_HandlerList_can_remove_items_by_identifier():
"""A HandlerList can remove items once added."""
handler_list = HandlerList()
handler_list.add(mock_callback, 1, 'first-item')
handler_list.add(mock_alt_callback, 2, 'second-item')
assert len(handler_list) == 2
handler_list.remove('first-item')
assert len(handler_list) == 1
handler_list.remove('second-item')
assert len(handler_list) == 0
def test_HandlerList_can_remove_items_by_callback():
"""A HandlerList can remove items once added."""
handler_list = HandlerList()
handler_list.add(mock_callback, 1)
handler_list.add(mock_alt_callback, 2)
assert len(handler_list) == 2
handler_list.remove(mock_callback)
assert len(handler_list) == 1
handler_list.remove(mock_alt_callback)
assert len(handler_list) == 0
def test_HandlerList_callbacks_are_functions_that_can_be_called():
"""A HandlerList callback is a function that can be called."""
handler_list = HandlerList()
handler_list.add(mock_callback, 0)
handler_list.add(mock_alt_callback, 1)
assert handler_list[0]()
assert not handler_list[1]()
handler_list.add(mock_alt_callback, 3)
handler_list.add(mock_callback, 2)
assert handler_list[2]()
assert not handler_list[3]()
def test_HandlerList_can_be_iterated_over():
"""A HandlerList can be iterated over (looped through)."""
handler_list = HandlerList()
handler_list.add(mock_callback, 1)
handler_list.add(mock_callback, 2)
handler_list.add(mock_callback, 3)
calls = 0
for function in handler_list:
calls = calls + 1
assert function() # Should return true
assert calls == 3 # and should have been called 3 times
def test_HandlerList_can_iterate_using_with_method_filter():
"""A HandlerList can be iterated over using the with_method(method)
function to filter by propagation method."""
handler_list = HandlerList()
handler_list.add(mock_callback, 1, method=EVENT_BUBBLE)
handler_list.add(mock_callback, 2, method=EVENT_BUBBLE)
handler_list.add(mock_callback, 3, method=EVENT_BUBBLE)
handler_list.add(mock_alt_callback, 4, method=EVENT_CAPTURE)
handler_list.add(mock_alt_callback, 5, method=EVENT_CAPTURE)
handler_list.add(mock_alt_callback, 6, method=EVENT_CAPTURE)
handler_list.add(mock_alt_callback, 7, method=EVENT_CAPTURE)
calls = 0
for function in handler_list.with_method(EVENT_BUBBLE):
calls += 1
assert function() # Should return true
assert calls == 3 # and should have been called 3 times
for function in handler_list.with_method(EVENT_CAPTURE):
calls += 1
assert not function() # Should return false
assert calls == 7 # and should have been called 7 times (3 + 4 more)
def test_HandlerList_can_test_for_existance_of_identifier():
"""A HandlerList can use 'in' to test if an identifier is in the list."""
handler_list = HandlerList()
handler_list.add(mock_callback, 1, 'id')
handler_list.add(mock_callback, 1, 'abc')
assert 'id' in handler_list
assert 'abc' in handler_list
assert 'xyz' not in handler_list
def test_HandlerList_can_test_for_existance_of_function():
"""A HandlerList can use 'in' to test if a callback function is in the list."""
handler_list = HandlerList()
handler_list.add(mock_callback, 1, 'id')
assert mock_callback in handler_list
assert mock_alt_callback not in handler_list
handler_list.add(mock_alt_callback, 1, 'abc')
assert mock_alt_callback in handler_list
| |
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import fcntl
import grp
import os
import pwd
import signal
import sys
from neutron.common import exceptions
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def setuid(user_id_or_name):
try:
new_uid = int(user_id_or_name)
except (TypeError, ValueError):
new_uid = pwd.getpwnam(user_id_or_name).pw_uid
if new_uid != 0:
try:
os.setuid(new_uid)
except OSError:
msg = _('Failed to set uid %s') % new_uid
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
def setgid(group_id_or_name):
try:
new_gid = int(group_id_or_name)
except (TypeError, ValueError):
new_gid = grp.getgrnam(group_id_or_name).gr_gid
if new_gid != 0:
try:
os.setgid(new_gid)
except OSError:
msg = _('Failed to set gid %s') % new_gid
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
def drop_privileges(user=None, group=None):
"""Drop privileges to user/group privileges."""
if user is None and group is None:
return
if os.geteuid() != 0:
msg = _('Root permissions are required to drop privileges.')
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
if group is not None:
try:
os.setgroups([])
except OSError:
msg = _('Failed to remove supplemental groups')
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
setgid(group)
if user is not None:
setuid(user)
LOG.info(_LI("Process runs with uid/gid: %(uid)s/%(gid)s"),
{'uid': os.getuid(), 'gid': os.getgid()})
class Pidfile(object):
def __init__(self, pidfile, procname, uuid=None):
self.pidfile = pidfile
self.procname = procname
self.uuid = uuid
try:
self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
LOG.exception(_LE("Error while handling pidfile: %s"), pidfile)
sys.exit(1)
def __str__(self):
return self.pidfile
def unlock(self):
if not not fcntl.flock(self.fd, fcntl.LOCK_UN):
raise IOError(_('Unable to unlock pid file'))
def write(self, pid):
os.ftruncate(self.fd, 0)
os.write(self.fd, "%d" % pid)
os.fsync(self.fd)
def read(self):
try:
pid = int(os.read(self.fd, 128))
os.lseek(self.fd, 0, os.SEEK_SET)
return pid
except ValueError:
return
def is_running(self):
pid = self.read()
if not pid:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
exec_out = f.readline()
return self.procname in exec_out and (not self.uuid or
self.uuid in exec_out)
except IOError:
return False
class Daemon(object):
"""A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null', procname='python', uuid=None,
user=None, group=None):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.procname = procname
self.pidfile = Pidfile(pidfile, procname, uuid)
self.user = user
self.group = group
def _fork(self):
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError:
LOG.exception(_LE('Fork failed'))
sys.exit(1)
def daemonize(self):
"""Daemonize process by doing Stevens double fork."""
# fork first time
self._fork()
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# fork second time
self._fork()
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
stdin = open(self.stdin, 'r')
stdout = open(self.stdout, 'a+')
stderr = open(self.stderr, 'a+', 0)
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delete_pid)
signal.signal(signal.SIGTERM, self.handle_sigterm)
self.pidfile.write(os.getpid())
def delete_pid(self):
os.remove(str(self.pidfile))
def handle_sigterm(self, signum, frame):
sys.exit(0)
def start(self):
"""Start the daemon."""
if self.pidfile.is_running():
self.pidfile.unlock()
LOG.error(_LE('Pidfile %s already exist. Daemon already '
'running?'), self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def run(self):
"""Override this method and call super().run when subclassing Daemon.
start() will call this method after the process has daemonized.
"""
drop_privileges(self.user, self.group)
| |
#----------------------------------------------------------------------
# Copyright (c) 2008 Board of Trustees, Princeton University
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
##
# Implements SFA GID. GIDs are based on certificates, and the GID class is a
# descendant of the certificate class.
##
import xmlrpclib
import uuid
from sfa.trust.certificate import Certificate
from sfa.util.faults import *
from sfa.util.sfalogging import logger
from sfa.util.xrn import hrn_to_urn, urn_to_hrn, hrn_authfor_hrn
##
# Create a new uuid. Returns the UUID as a string.
def create_uuid():
return str(uuid.uuid4().int)
##
# GID is a tuple:
# (uuid, urn, public_key)
#
# UUID is a unique identifier and is created by the python uuid module
# (or the utility function create_uuid() in gid.py).
#
# HRN is a human readable name. It is a dotted form similar to a backward domain
# name. For example, planetlab.us.arizona.bakers.
#
# URN is a human readable identifier of form:
# "urn:publicid:IDN+toplevelauthority[:sub-auth.]*[\res. type]\ +object name"
# For example, urn:publicid:IDN+planetlab:us:arizona+user+bakers
#
# PUBLIC_KEY is the public key of the principal identified by the UUID/HRN.
# It is a Keypair object as defined in the cert.py module.
#
# It is expected that there is a one-to-one pairing between UUIDs and HRN,
# but it is uncertain how this would be inforced or if it needs to be enforced.
#
# These fields are encoded using xmlrpc into the subjectAltName field of the
# x509 certificate. Note: Call encode() once the fields have been filled in
# to perform this encoding.
class GID(Certificate):
uuid = None
hrn = None
urn = None
##
# Create a new GID object
#
# @param create If true, create the X509 certificate
# @param subject If subject!=None, create the X509 cert and set the subject name
# @param string If string!=None, load the GID from a string
# @param filename If filename!=None, load the GID from a file
# @param lifeDays life of GID in days - default is 1825==5 years
def __init__(self, create=False, subject=None, string=None, filename=None, uuid=None, hrn=None, urn=None, lifeDays=1825):
Certificate.__init__(self, lifeDays, create, subject, string, filename)
if subject:
logger.debug("Creating GID for subject: %s" % subject)
if uuid:
self.uuid = int(uuid)
if hrn:
self.hrn = hrn
self.urn = hrn_to_urn(hrn, 'unknown')
if urn:
self.urn = urn
self.hrn, type = urn_to_hrn(urn)
def set_uuid(self, uuid):
if isinstance(uuid, str):
self.uuid = int(uuid)
else:
self.uuid = uuid
def get_uuid(self):
if not self.uuid:
self.decode()
return self.uuid
def set_hrn(self, hrn):
self.hrn = hrn
def get_hrn(self):
if not self.hrn:
self.decode()
return self.hrn
def set_urn(self, urn):
self.urn = urn
self.hrn, type = urn_to_hrn(urn)
def get_urn(self):
if not self.urn:
self.decode()
return self.urn
def get_type(self):
if not self.urn:
self.decode()
_, t = urn_to_hrn(self.urn)
return t
##
# Encode the GID fields and package them into the subject-alt-name field
# of the X509 certificate. This must be called prior to signing the
# certificate. It may only be called once per certificate.
def encode(self):
if self.urn:
urn = self.urn
else:
urn = hrn_to_urn(self.hrn, None)
str = "URI:" + urn
if self.uuid:
str += ", " + "URI:" + uuid.UUID(int=self.uuid).urn
self.set_data(str, 'subjectAltName')
##
# Decode the subject-alt-name field of the X509 certificate into the
# fields of the GID. This is automatically called by the various get_*()
# functions in this class.
def decode(self):
data = self.get_data('subjectAltName')
dict = {}
if data:
if data.lower().startswith('uri:http://<params>'):
dict = xmlrpclib.loads(data[11:])[0][0]
else:
spl = data.split(', ')
for val in spl:
if val.lower().startswith('uri:urn:uuid:'):
dict['uuid'] = uuid.UUID(val[4:]).int
elif val.lower().startswith('uri:urn:publicid:idn+'):
dict['urn'] = val[4:]
self.uuid = dict.get("uuid", None)
self.urn = dict.get("urn", None)
self.hrn = dict.get("hrn", None)
if self.urn:
self.hrn = urn_to_hrn(self.urn)[0]
##
# Dump the credential to stdout.
#
# @param indent specifies a number of spaces to indent the output
# @param dump_parents If true, also dump the parents of the GID
def dump(self, *args, **kwargs):
print self.dump_string(*args,**kwargs)
def dump_string(self, indent=0, dump_parents=False):
result=" "*(indent-2) + "GID\n"
result += " "*indent + "hrn:" + str(self.get_hrn()) +"\n"
result += " "*indent + "urn:" + str(self.get_urn()) +"\n"
result += " "*indent + "uuid:" + str(self.get_uuid()) + "\n"
filename=self.get_filename()
if filename: result += "Filename %s\n"%filename
if self.parent and dump_parents:
result += " "*indent + "parent:\n"
result += self.parent.dump_string(indent+4, dump_parents)
return result
##
# Verify the chain of authenticity of the GID. First perform the checks
# of the certificate class (verifying that each parent signs the child,
# etc). In addition, GIDs also confirm that the parent's HRN is a prefix
# of the child's HRN, and the parent is of type 'authority'.
#
# Verifying these prefixes prevents a rogue authority from signing a GID
# for a principal that is not a member of that authority. For example,
# planetlab.us.arizona cannot sign a GID for planetlab.us.princeton.foo.
def verify_chain(self, trusted_certs = None):
# do the normal certificate verification stuff
trusted_root = Certificate.verify_chain(self, trusted_certs)
if self.parent:
# make sure the parent's hrn is a prefix of the child's hrn
if not hrn_authfor_hrn(self.parent.get_hrn(), self.get_hrn()):
raise GidParentHrn("This cert HRN %s isn't in the namespace for parent HRN %s" % (self.get_hrn(), self.parent.get_hrn()))
# Parent must also be an authority (of some type) to sign a GID
# There are multiple types of authority - accept them all here
if not self.parent.get_type().find('authority') == 0:
raise GidInvalidParentHrn("This cert %s's parent %s is not an authority (is a %s)" % (self.get_hrn(), self.parent.get_hrn(), self.parent.get_type()))
# Then recurse up the chain - ensure the parent is a trusted
# root or is in the namespace of a trusted root
self.parent.verify_chain(trusted_certs)
else:
# make sure that the trusted root's hrn is a prefix of the child's
trusted_gid = GID(string=trusted_root.save_to_string())
trusted_type = trusted_gid.get_type()
trusted_hrn = trusted_gid.get_hrn()
#if trusted_type == 'authority':
# trusted_hrn = trusted_hrn[:trusted_hrn.rindex('.')]
cur_hrn = self.get_hrn()
if not hrn_authfor_hrn(trusted_hrn, cur_hrn):
raise GidParentHrn("Trusted root with HRN %s isn't a namespace authority for this cert %s" % (trusted_hrn, cur_hrn))
# There are multiple types of authority - accept them all here
if not trusted_type.find('authority') == 0:
raise GidInvalidParentHrn("This cert %s's trusted root signer %s is not an authority (is a %s)" % (self.get_hrn(), trusted_hrn, trusted_type))
return
| |
import sys
import os
import unittest
sys.path.append(os.path.abspath('../'))
from menu import Node
class NodeTests(unittest.TestCase):
def setUp(self):
"""Before each test case"""
def tearDown(self):
"""After each test case"""
def test_Path(self):
"""Node.Path() should return all of the Node's ancestors' shortcuts
(except the root Node) concatenated.
"""
root = Node()
child = Node()
child.shortcut = 'child'
child.parent = root
root.children.append(child)
descendant = Node()
descendant.shortcut = 'descendant'
descendant.parent = child
child.children.append(descendant)
self.assertEquals(root.Path(), '', '[root].Path() should return "".')
self.assertEquals(child.Path(), 'child',
'[child].Path() should return "child".')
self.assertEquals(descendant.Path(), 'childdescendant',
'[descendant].Path() should return "childdescendant".')
def test_Resolve(self):
""" Node.Resolve(path) should take a path and return the sub-Node it
references.
"""
root = Node()
child = Node()
descendant = Node()
noise = Node() # An extra node to make sure selection is working.
child.shortcut = 'ch'
root.children.append(child)
child.parent = root
noise.shortcut = 'ch not used' # Make sure it doesn't match this,
root.children.append(noise)
noise.parent = root
descendant.shortcut = 'desc'
child.children.append(descendant)
descendant.parent = child
# Should get an error if looking for a sub-Node that doesn't exist
self.assertRaises(LookupError, root.Resolve, 'not valid')
# Return a child of root (no recursion)
self.assertEquals(root.Resolve('ch'), child,
'Not finding a child Node.')
# Return a descendant of root (recursion)
self.assertEquals(root.Resolve('chdesc'), descendant,
'Not finding a descendant Node.')
def test_Match(self):
"""Node.Match(path) should return all possible matches to path within
[path].children or [path].parent.children.
"""
root = Node()
child = Node()
child.shortcut = 'ch1'
child.parent = root
root.children.append(child)
descendant = Node()
descendant.shortcut = 'd1'
descendant.parent = child
child.children.append(descendant)
descendant2 = Node()
descendant2.shortcut = 'd2'
descendant2.parent = child
child.children.append(descendant2)
child2 = Node()
child2.shortcut = 'other'
child2.parent = root
root.children.append(child2)
# An empty string should return all immediate children of [root].
target = root.Match('')
self.assertEquals(len(target), 2, 'Expected 2 results.')
self.assertEquals(target[0], child, 'Expected [child].')
self.assertEquals(target[1], child2, 'Expected [child2].')
# "ch" should return only "ch1".
target = root.Match('ch')
self.assertEquals(len(target), 1, 'Expected 1 result.')
self.assertEquals(target[0], child, 'Expected [child].')
# "ch1" should return all immediate children of [child].
target = root.Match('ch1')
self.assertEquals(len(target), 2, 'Expected 2 results.')
self.assertEquals(target[0], descendant, 'Expected [descendant].')
self.assertEquals(target[1], descendant2, 'Expected [descendant2].')
# "ch1d" should return both immediate children of [child].
target = root.Match('ch1d')
self.assertEquals(len(target), 2, 'Expected 2 results.')
self.assertEquals(target[0], descendant, 'Expected [descendant].')
self.assertEquals(target[1], descendant2, 'Expected [descendant2].')
# "ch1d2" should return [descendant2].
target = root.Match('ch1d2')
self.assertEquals(len(target), 1, 'Expected 1 result.')
self.assertEquals(target[0], descendant2, 'Expected [descendant2].')
# "ch1d3" should return [].
target = root.Match('ch1d3')
self.assertEquals(len(target), 0)
# "otherfake" should return [].
target = root.Match('otherfake')
self.assertEquals(len(target), 0)
def test_Load(self):
"""Tests Node.Load().
This is kind of an integration test, as load() is currently just a
wrapper around 3 "private" methods. See below for direct tests on them.
"""
# Given..
file_contents = \
'Shortcut=child\nShortcut=descendant\nParent=child'
# When..
root = Node.Load(file_contents)
# Then.. [This is just a copy from test_link]
self.assertEquals(len(root.children), 1,
'Expected 1 child of [root].')
self.assertEquals(len(root.children[0].children), 1,
'Expected 1 child of [child].')
self.assertEquals(len(root.children[0].children[0].children), 0,
'Expected 0 children of [descendant].')
self.assertTrue(root.parent is None,
'Expected the parent of [root] to be None.')
self.assertEquals(root.children[0].parent, root,
'Expected the parent of [child] to be [root].')
self.assertEquals(root.children[0].children[0].parent,
root.children[0],
'Expected the parent of [descendant] to be [child].')
def test_Parse(self):
# Given..
file_contents = \
'Shortcut=10 \n\tval=ten\r\nShortcut=15\t\rval=fifteen '
# When..
result = Node._Parse(file_contents)
# Then..
self.assertEquals(len(result), 2,
'Not detecting empty line borders between nodes properly.')
self.assertEquals(len(result[0]), 2,
'Wrong number of key/value pairs in the first node.')
self.assertEquals(result[0]['shortcut'], '10',
'Failed to parse the first key/value pair of the first node.')
self.assertEquals(result[0]['val'], 'ten',
'Failed to parse the second key/value pair of the first node.')
self.assertEquals(len(result[1]), 2,
'Wrong number of key/value pairs in the second node.')
self.assertEquals(result[1]['shortcut'], '15',
'Failed to parse the first key/value pair of the second node.')
self.assertEquals(result[1]['val'], 'fifteen',
'Failed to parse the second key/value pair of the second node.')
def test_Generate_Nodes(self):
# Given..
dictionaries = []
dictionaries.append({})
dictionaries[0]['shortcut'] = 'child'
dictionaries[0]['description'] = 'child description'
dictionaries.append({})
dictionaries[1]['shortcut'] = 'descendant'
dictionaries[1]['parent'] = 'child'
dictionaries[1]['working_directory'] = 'working dir'
# When..
result = Node._Generate_Nodes(dictionaries)
# Then..
self.assertEquals(len(result), 2, 'Expected 2 results.')
self.assertEquals(result[0].shortcut, 'child',
'Expected result0.shortcut to be "child".')
self.assertTrue(result[0].parent is None,
'Expected result0.parent to be None.')
self.assertEquals(len(result[0].children), 0,
'Expected result0.children to be empty.')
self.assertEquals(result[0].description, 'child description',
'Expected result0.description to be "child description".')
self.assertIsNone(result[0].working_directory,
'Expected result0.working_directory to be None.')
self.assertEquals(result[1].shortcut, 'descendant',
'Expected result1.shortcut to be "descendant".')
self.assertEquals(result[1].parent, 'child',
'Expected result1.parent to be "child".')
self.assertEquals(len(result[1].children), 0,
'Expected result1.children to be empty.')
self.assertTrue(result[1].description is None,
'Expected result1.description to be None.')
self.assertEquals(result[1].working_directory, 'working dir',
'Expected result1.working_directory to be "working_dir".')
def test_Link(self):
# Given..
nodes = []
nodes.append(Node())
nodes.append(Node())
nodes[0].shortcut = 'child'
nodes[1].shortcut = 'descendant'
nodes[1].parent = 'child'
# When..
root = Node._Link(nodes)
# Then..
self.assertEquals(len(root.children), 1,
'Expected 1 child of [root].')
self.assertEquals(len(root.children[0].children), 1,
'Expected 1 child of [child].')
self.assertEquals(len(root.children[0].children[0].children), 0,
'Expected 0 children of [descendant].')
self.assertTrue(root.parent is None,
'Expected the parent of [root] to be None.')
self.assertEquals(root.children[0].parent, root,
'Expected the parent of [child] to be [root].')
self.assertEquals(root.children[0].children[0].parent,
root.children[0],
'Expected the parent of [descendant] to be [child].')
if __name__ == "__main__":
unittest.main()
| |
import os
from django.conf import settings
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.authentication import BaseAuthentication
from rest_framework.decorators import (api_view, authentication_classes,
permission_classes)
from rest_framework.exceptions import ParseError
from rest_framework.filters import OrderingFilter
from rest_framework.mixins import (CreateModelMixin, DestroyModelMixin,
ListModelMixin, RetrieveModelMixin)
from rest_framework.parsers import FormParser, JSONParser
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
import mkt.comm.forms as forms
import mkt.constants.comm as comm
from mkt.access import acl
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.base import CORSMixin, MarketplaceView, SilentListModelMixin
from mkt.comm.models import (CommAttachment, CommunicationNote,
CommunicationThread, CommunicationThreadCC,
user_has_perm_app)
from mkt.comm.permissions import (AttachmentPermission,
EmailCreationPermission, NotePermission,
ThreadPermission)
from mkt.comm.serializers import (NoteSerializer, ThreadSerializer,
ThreadSerializerV2, ThreadSimpleSerializer)
from mkt.comm.tasks import consume_email
from mkt.comm.utils import create_attachments, create_comm_note
from mkt.site.utils import get_file_response
class NoAuthentication(BaseAuthentication):
def authenticate(self, request):
return request._request.user, None
class CommViewSet(CORSMixin, MarketplaceView, GenericViewSet):
"""Some overriding and mixin stuff to adapt other viewsets."""
parser_classes = (FormParser, JSONParser)
def patched_get_request(self):
return lambda x: self.request
def get_serializer_class(self):
original = super(CommViewSet, self).get_serializer_class()
original.get_request = self.patched_get_request()
return original
def partial_update(self, request, *args, **kwargs):
return Response('Requested update operation not supported',
status=status.HTTP_403_FORBIDDEN)
class ThreadViewSet(SilentListModelMixin, RetrieveModelMixin,
DestroyModelMixin, CreateModelMixin, CommViewSet):
model = CommunicationThread
serializer_class = ThreadSerializer
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = (ThreadPermission,)
filter_backends = (OrderingFilter,)
cors_allowed_methods = ['get', 'post', 'patch']
def list(self, request):
"""Deprecated by CommAppViewSet and ThreadViewSetV2."""
self.serializer_class = ThreadSerializer
profile = request.user
# We list all the threads where the user has been CC'd.
cc = list(profile.comm_thread_cc.values_list('thread', flat=True))
# This gives 404 when an app with given slug/id is not found.
data = {}
if 'app' in request.GET:
form = forms.AppSlugForm(request.GET)
if not form.is_valid():
return Response('App does not exist or no app slug given',
status=status.HTTP_404_NOT_FOUND)
elif not user_has_perm_app(profile, form.cleaned_data['app']):
return Response('You do not have permissions for this app',
status=status.HTTP_403_FORBIDDEN)
queryset = CommunicationThread.objects.filter(
_webapp=form.cleaned_data['app'])
# Thread IDs and version numbers from same app.
data['app_threads'] = list(queryset.order_by('_version__version')
.values('id', '_version__version'))
for app_thread in data['app_threads']:
app_thread['version__version'] = app_thread.pop(
'_version__version')
else:
# We list all the threads that user is developer of or
# is subscribed/CC'ed to.
queryset = CommunicationThread.objects.filter(pk__in=cc)
self.queryset = queryset
res = SilentListModelMixin.list(self, request)
if res.data:
res.data.update(data)
return res
def retrieve(self, *args, **kwargs):
"""Deprecated by AppThreadViewSetV2."""
res = super(ThreadViewSet, self).retrieve(*args, **kwargs)
# Thread IDs and version numbers from same app.
res.data['app_threads'] = list(
CommunicationThread.objects.filter(_webapp_id=res.data['webapp'])
.order_by('_version__version').values('id', '_version__version'))
for app_thread in res.data['app_threads']:
app_thread['version__version'] = app_thread.pop(
'_version__version')
return res
def create(self, request, *args, **kwargs):
form = forms.CreateCommThreadForm(request.DATA)
if not form.is_valid():
return Response(
form.errors, status=status.HTTP_400_BAD_REQUEST)
app = form.cleaned_data['app']
version = form.cleaned_data['version']
thread, note = create_comm_note(
app, version, request.user, form.cleaned_data['body'],
note_type=form.cleaned_data['note_type'])
return Response(
NoteSerializer(note, context={'request': self.request}).data,
status=status.HTTP_201_CREATED)
class NoteViewSet(ListModelMixin, CreateModelMixin, RetrieveModelMixin,
DestroyModelMixin, CommViewSet):
model = CommunicationNote
serializer_class = NoteSerializer
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = (NotePermission,)
filter_backends = (OrderingFilter,)
cors_allowed_methods = ['get', 'patch', 'post']
def get_queryset(self):
return CommunicationNote.objects.with_perms(
self.request.user, self.comm_thread)
def create(self, request, *args, **kwargs):
thread = get_object_or_404(CommunicationThread, id=kwargs['thread_id'])
# Validate note.
form = forms.CreateCommNoteForm(request.DATA)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
note_type = form.cleaned_data['note_type']
if (note_type == comm.DEVELOPER_COMMENT and not
request.user.webappuser_set.filter(
webapp=thread.webapp).exists()):
# Developer comment only for developers.
return Response('Only developers can make developer comments',
status=status.HTTP_403_FORBIDDEN)
elif (note_type == comm.REVIEWER_COMMENT and not
acl.check_reviewer(request)):
# Reviewer comment only for reviewers.
return Response('Only reviewers can make reviewer comments',
status=status.HTTP_403_FORBIDDEN)
# Create notes.
thread, note = create_comm_note(
thread.webapp, thread.version, self.request.user,
form.cleaned_data['body'], note_type=note_type)
return Response(
NoteSerializer(note, context={'request': request}).data,
status=status.HTTP_201_CREATED)
class AttachmentViewSet(CreateModelMixin, CommViewSet):
model = CommAttachment
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = (AttachmentPermission,)
cors_allowed_methods = ['get', 'post']
def get(self, request, note_id, pk, *args, **kwargs):
attach = get_object_or_404(CommAttachment, pk=pk)
self.check_object_permissions(request, attach)
full_path = os.path.join(settings.REVIEWER_ATTACHMENTS_PATH,
attach.filepath)
content_type = 'application/force-download'
if attach.is_image():
content_type = 'image'
return get_file_response(request, full_path, content_type=content_type)
def create(self, request, note_id, *args, **kwargs):
note = get_object_or_404(CommunicationNote, id=note_id)
if not note.author.id == request.user.id:
return Response(
[{'non_field_errors':
'You must be owner of the note to attach a file.'}],
status=status.HTTP_403_FORBIDDEN)
# Validate attachment.
attachment_formset = None
if request.FILES:
data = request.POST.copy()
data.update({
'form-TOTAL_FORMS': len([k for k in request.FILES if
k.endswith('-attachment')]),
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': comm.MAX_ATTACH
})
if data['form-TOTAL_FORMS'] > comm.MAX_ATTACH:
# TODO: use formset validate_max=True in Django 1.6.
return Response(
[{'non_field_errors':
'Maximum of %s files can be attached.'}],
status=status.HTTP_400_BAD_REQUEST)
attachment_formset = forms.CommAttachmentFormSet(
data=data, files=request.FILES or None)
if not attachment_formset.is_valid():
return Response(attachment_formset.errors,
status=status.HTTP_400_BAD_REQUEST)
else:
return Response([{'non_field_errors': 'No files were attached.'}],
status=status.HTTP_400_BAD_REQUEST)
# Create attachment.
if attachment_formset:
create_attachments(note, attachment_formset)
return Response(
NoteSerializer(note, context={'request': request}).data,
status=status.HTTP_201_CREATED)
class ThreadCCViewSet(DestroyModelMixin, CommViewSet):
model = CommunicationThreadCC
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = ()
cors_allowed_methods = ['delete']
def destroy(self, request, **kw):
form = forms.UnCCForm(kw)
if not form.is_valid():
return Response(status=status.HTTP_400_BAD_REQUEST)
CommunicationThreadCC.objects.filter(
thread=form.cleaned_data['pk'],
user=request.user).delete()
return Response("Successfully un-cc'ed from thread.",
status=status.HTTP_204_NO_CONTENT)
@api_view(['POST'])
@authentication_classes((NoAuthentication,))
@permission_classes((EmailCreationPermission,))
def post_email(request):
email_body = request.POST.get('body')
if not email_body:
raise ParseError(
detail='email_body not present in the POST data.')
consume_email.apply_async((email_body,))
return Response(status=status.HTTP_201_CREATED)
class CommAppListView(SilentListModelMixin, CommViewSet):
model = CommunicationThread
serializer_class = ThreadSerializerV2
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = (ThreadPermission,) # On self.queryset.
cors_allowed_methods = ['get']
def list(self, request, app_slug):
"""Return list of threads for the app."""
form = forms.AppSlugForm({'app': app_slug})
if not form.is_valid():
# 404 if app with given slug/id not found.
return Response('App does not exist or no app slug given',
status=status.HTTP_404_NOT_FOUND)
elif not user_has_perm_app(request.user, form.cleaned_data['app']):
# 403 if user does not have auth to access app's comm.
return Response('You do not have permissions for this app',
status=status.HTTP_403_FORBIDDEN)
# Use simple serializer, which rets only ID + Version #s, if specified.
if request.GET.get('serializer') == 'simple':
self.serializer_class = ThreadSimpleSerializer
self.queryset = CommunicationThread.objects.filter(
_webapp=form.cleaned_data['app']).order_by('_version__version')
return SilentListModelMixin.list(self, request)
class ThreadViewSetV2(ThreadViewSet):
serializer_class = ThreadSerializerV2
def list(self, request):
"""List all the threads where the user has been CC'd."""
cc = list(request.user.comm_thread_cc.values_list('thread', flat=True))
self.queryset = CommunicationThread.objects.filter(pk__in=cc)
return SilentListModelMixin.list(self, request)
| |
"""Support for KNX/IP lights."""
from __future__ import annotations
from typing import Any, cast
from xknx import XKNX
from xknx.devices.light import Light as XknxLight, XYYColor
from homeassistant import config_entries
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_XY_COLOR,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_XY,
LightEntity,
)
from homeassistant.const import CONF_ENTITY_CATEGORY, CONF_NAME, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType
import homeassistant.util.color as color_util
from .const import DATA_KNX_CONFIG, DOMAIN, KNX_ADDRESS, ColorTempModes
from .knx_entity import KnxEntity
from .schema import LightSchema
async def async_setup_entry(
hass: HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up light(s) for KNX platform."""
xknx: XKNX = hass.data[DOMAIN].xknx
config: list[ConfigType] = hass.data[DATA_KNX_CONFIG][Platform.LIGHT]
async_add_entities(KNXLight(xknx, entity_config) for entity_config in config)
def _create_light(xknx: XKNX, config: ConfigType) -> XknxLight:
"""Return a KNX Light device to be used within XKNX."""
def individual_color_addresses(color: str, feature: str) -> Any | None:
"""Load individual color address list from configuration structure."""
if (
LightSchema.CONF_INDIVIDUAL_COLORS not in config
or color not in config[LightSchema.CONF_INDIVIDUAL_COLORS]
):
return None
return config[LightSchema.CONF_INDIVIDUAL_COLORS][color].get(feature)
group_address_tunable_white = None
group_address_tunable_white_state = None
group_address_color_temp = None
group_address_color_temp_state = None
if config[LightSchema.CONF_COLOR_TEMP_MODE] == ColorTempModes.ABSOLUTE:
group_address_color_temp = config.get(LightSchema.CONF_COLOR_TEMP_ADDRESS)
group_address_color_temp_state = config.get(
LightSchema.CONF_COLOR_TEMP_STATE_ADDRESS
)
elif config[LightSchema.CONF_COLOR_TEMP_MODE] == ColorTempModes.RELATIVE:
group_address_tunable_white = config.get(LightSchema.CONF_COLOR_TEMP_ADDRESS)
group_address_tunable_white_state = config.get(
LightSchema.CONF_COLOR_TEMP_STATE_ADDRESS
)
return XknxLight(
xknx,
name=config[CONF_NAME],
group_address_switch=config.get(KNX_ADDRESS),
group_address_switch_state=config.get(LightSchema.CONF_STATE_ADDRESS),
group_address_brightness=config.get(LightSchema.CONF_BRIGHTNESS_ADDRESS),
group_address_brightness_state=config.get(
LightSchema.CONF_BRIGHTNESS_STATE_ADDRESS
),
group_address_color=config.get(LightSchema.CONF_COLOR_ADDRESS),
group_address_color_state=config.get(LightSchema.CONF_COLOR_STATE_ADDRESS),
group_address_rgbw=config.get(LightSchema.CONF_RGBW_ADDRESS),
group_address_rgbw_state=config.get(LightSchema.CONF_RGBW_STATE_ADDRESS),
group_address_hue=config.get(LightSchema.CONF_HUE_ADDRESS),
group_address_hue_state=config.get(LightSchema.CONF_HUE_STATE_ADDRESS),
group_address_saturation=config.get(LightSchema.CONF_SATURATION_ADDRESS),
group_address_saturation_state=config.get(
LightSchema.CONF_SATURATION_STATE_ADDRESS
),
group_address_xyy_color=config.get(LightSchema.CONF_XYY_ADDRESS),
group_address_xyy_color_state=config.get(LightSchema.CONF_XYY_STATE_ADDRESS),
group_address_tunable_white=group_address_tunable_white,
group_address_tunable_white_state=group_address_tunable_white_state,
group_address_color_temperature=group_address_color_temp,
group_address_color_temperature_state=group_address_color_temp_state,
group_address_switch_red=individual_color_addresses(
LightSchema.CONF_RED, KNX_ADDRESS
),
group_address_switch_red_state=individual_color_addresses(
LightSchema.CONF_RED, LightSchema.CONF_STATE_ADDRESS
),
group_address_brightness_red=individual_color_addresses(
LightSchema.CONF_RED, LightSchema.CONF_BRIGHTNESS_ADDRESS
),
group_address_brightness_red_state=individual_color_addresses(
LightSchema.CONF_RED, LightSchema.CONF_BRIGHTNESS_STATE_ADDRESS
),
group_address_switch_green=individual_color_addresses(
LightSchema.CONF_GREEN, KNX_ADDRESS
),
group_address_switch_green_state=individual_color_addresses(
LightSchema.CONF_GREEN, LightSchema.CONF_STATE_ADDRESS
),
group_address_brightness_green=individual_color_addresses(
LightSchema.CONF_GREEN, LightSchema.CONF_BRIGHTNESS_ADDRESS
),
group_address_brightness_green_state=individual_color_addresses(
LightSchema.CONF_GREEN, LightSchema.CONF_BRIGHTNESS_STATE_ADDRESS
),
group_address_switch_blue=individual_color_addresses(
LightSchema.CONF_BLUE, KNX_ADDRESS
),
group_address_switch_blue_state=individual_color_addresses(
LightSchema.CONF_BLUE, LightSchema.CONF_STATE_ADDRESS
),
group_address_brightness_blue=individual_color_addresses(
LightSchema.CONF_BLUE, LightSchema.CONF_BRIGHTNESS_ADDRESS
),
group_address_brightness_blue_state=individual_color_addresses(
LightSchema.CONF_BLUE, LightSchema.CONF_BRIGHTNESS_STATE_ADDRESS
),
group_address_switch_white=individual_color_addresses(
LightSchema.CONF_WHITE, KNX_ADDRESS
),
group_address_switch_white_state=individual_color_addresses(
LightSchema.CONF_WHITE, LightSchema.CONF_STATE_ADDRESS
),
group_address_brightness_white=individual_color_addresses(
LightSchema.CONF_WHITE, LightSchema.CONF_BRIGHTNESS_ADDRESS
),
group_address_brightness_white_state=individual_color_addresses(
LightSchema.CONF_WHITE, LightSchema.CONF_BRIGHTNESS_STATE_ADDRESS
),
min_kelvin=config[LightSchema.CONF_MIN_KELVIN],
max_kelvin=config[LightSchema.CONF_MAX_KELVIN],
)
class KNXLight(KnxEntity, LightEntity):
"""Representation of a KNX light."""
_device: XknxLight
def __init__(self, xknx: XKNX, config: ConfigType) -> None:
"""Initialize of KNX light."""
super().__init__(_create_light(xknx, config))
self._max_kelvin: int = config[LightSchema.CONF_MAX_KELVIN]
self._min_kelvin: int = config[LightSchema.CONF_MIN_KELVIN]
self._attr_max_mireds = color_util.color_temperature_kelvin_to_mired(
self._min_kelvin
)
self._attr_min_mireds = color_util.color_temperature_kelvin_to_mired(
self._max_kelvin
)
self._attr_entity_category = config.get(CONF_ENTITY_CATEGORY)
self._attr_unique_id = self._device_unique_id()
def _device_unique_id(self) -> str:
"""Return unique id for this device."""
if self._device.switch.group_address is not None:
return f"{self._device.switch.group_address}"
return (
f"{self._device.red.brightness.group_address}_"
f"{self._device.green.brightness.group_address}_"
f"{self._device.blue.brightness.group_address}"
)
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return bool(self._device.state)
@property
def brightness(self) -> int | None:
"""Return the brightness of this light between 0..255."""
if self._device.supports_brightness:
return self._device.current_brightness
if self._device.current_xyy_color is not None:
_, brightness = self._device.current_xyy_color
return brightness
if self._device.supports_color or self._device.supports_rgbw:
rgb, white = self._device.current_color
if rgb is None:
return white
if white is None:
return max(rgb)
return max(*rgb, white)
return None
@property
def rgb_color(self) -> tuple[int, int, int] | None:
"""Return the rgb color value [int, int, int]."""
if self._device.supports_color:
rgb, _ = self._device.current_color
if rgb is not None:
if not self._device.supports_brightness:
# brightness will be calculated from color so color must not hold brightness again
return cast(
tuple[int, int, int], color_util.match_max_scale((255,), rgb)
)
return rgb
return None
@property
def rgbw_color(self) -> tuple[int, int, int, int] | None:
"""Return the rgbw color value [int, int, int, int]."""
if self._device.supports_rgbw:
rgb, white = self._device.current_color
if rgb is not None and white is not None:
if not self._device.supports_brightness:
# brightness will be calculated from color so color must not hold brightness again
return cast(
tuple[int, int, int, int],
color_util.match_max_scale((255,), (*rgb, white)),
)
return (*rgb, white)
return None
@property
def hs_color(self) -> tuple[float, float] | None:
"""Return the hue and saturation color value [float, float]."""
# Hue is scaled 0..360 int encoded in 1 byte in KNX (-> only 256 possible values)
# Saturation is scaled 0..100 int
return self._device.current_hs_color
@property
def xy_color(self) -> tuple[float, float] | None:
"""Return the xy color value [float, float]."""
if self._device.current_xyy_color is not None:
xy_color, _ = self._device.current_xyy_color
return xy_color
return None
@property
def color_temp(self) -> int | None:
"""Return the color temperature in mireds."""
if self._device.supports_color_temperature:
kelvin = self._device.current_color_temperature
# Avoid division by zero if actuator reported 0 Kelvin (e.g., uninitialized DALI-Gateway)
if kelvin is not None and kelvin > 0:
return color_util.color_temperature_kelvin_to_mired(kelvin)
if self._device.supports_tunable_white:
relative_ct = self._device.current_tunable_white
if relative_ct is not None:
# as KNX devices typically use Kelvin we use it as base for
# calculating ct from percent
return color_util.color_temperature_kelvin_to_mired(
self._min_kelvin
+ ((relative_ct / 255) * (self._max_kelvin - self._min_kelvin))
)
return None
@property
def color_mode(self) -> str | None:
"""Return the color mode of the light."""
if self._device.supports_xyy_color:
return COLOR_MODE_XY
if self._device.supports_hs_color:
return COLOR_MODE_HS
if self._device.supports_rgbw:
return COLOR_MODE_RGBW
if self._device.supports_color:
return COLOR_MODE_RGB
if (
self._device.supports_color_temperature
or self._device.supports_tunable_white
):
return COLOR_MODE_COLOR_TEMP
if self._device.supports_brightness:
return COLOR_MODE_BRIGHTNESS
return COLOR_MODE_ONOFF
@property
def supported_color_modes(self) -> set | None:
"""Flag supported color modes."""
return {self.color_mode}
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the light on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
mireds = kwargs.get(ATTR_COLOR_TEMP)
rgb = kwargs.get(ATTR_RGB_COLOR)
rgbw = kwargs.get(ATTR_RGBW_COLOR)
hs_color = kwargs.get(ATTR_HS_COLOR)
xy_color = kwargs.get(ATTR_XY_COLOR)
if (
not self.is_on
and brightness is None
and mireds is None
and rgb is None
and rgbw is None
and hs_color is None
and xy_color is None
):
await self._device.set_on()
return
async def set_color(
rgb: tuple[int, int, int], white: int | None, brightness: int | None
) -> None:
"""Set color of light. Normalize colors for brightness when not writable."""
if self._device.brightness.writable:
# let the KNX light controller handle brightness
await self._device.set_color(rgb, white)
if brightness:
await self._device.set_brightness(brightness)
return
if brightness is None:
# normalize for brightness if brightness is derived from color
brightness = self.brightness or 255
rgb = cast(
tuple[int, int, int],
tuple(color * brightness // 255 for color in rgb),
)
white = white * brightness // 255 if white is not None else None
await self._device.set_color(rgb, white)
# return after RGB(W) color has changed as it implicitly sets the brightness
if rgbw is not None:
await set_color(rgbw[:3], rgbw[3], brightness)
return
if rgb is not None:
await set_color(rgb, None, brightness)
return
if mireds is not None:
kelvin = int(color_util.color_temperature_mired_to_kelvin(mireds))
kelvin = min(self._max_kelvin, max(self._min_kelvin, kelvin))
if self._device.supports_color_temperature:
await self._device.set_color_temperature(kelvin)
elif self._device.supports_tunable_white:
relative_ct = int(
255
* (kelvin - self._min_kelvin)
/ (self._max_kelvin - self._min_kelvin)
)
await self._device.set_tunable_white(relative_ct)
if xy_color is not None:
await self._device.set_xyy_color(
XYYColor(color=xy_color, brightness=brightness)
)
return
if hs_color is not None:
# round so only one telegram will be sent if the other matches state
hue = round(hs_color[0])
sat = round(hs_color[1])
await self._device.set_hs_color((hue, sat))
if brightness is not None:
# brightness: 1..255; 0 brightness will call async_turn_off()
if self._device.brightness.writable:
await self._device.set_brightness(brightness)
return
# brightness without color in kwargs; set via color
if self.color_mode == COLOR_MODE_XY:
await self._device.set_xyy_color(XYYColor(brightness=brightness))
return
# default to white if color not known for RGB(W)
if self.color_mode == COLOR_MODE_RGBW:
_rgbw = self.rgbw_color
if not _rgbw or not any(_rgbw):
_rgbw = (0, 0, 0, 255)
await set_color(_rgbw[:3], _rgbw[3], brightness)
return
if self.color_mode == COLOR_MODE_RGB:
_rgb = self.rgb_color
if not _rgb or not any(_rgb):
_rgb = (255, 255, 255)
await set_color(_rgb, None, brightness)
return
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the light off."""
await self._device.set_off()
| |
# Copyright (c) 2013-2015 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""Finite State Machines Module"""
from __future__ import absolute_import
from __future__ import print_function
import copy
from pprint import pformat
from random import choice
from tulip.transys.labeled_graphs import LabeledDiGraph
# inline imports:
#
# import sys
# from tulip.transys.export import machine2scxml
_hl = 40 * '-'
# port type
pure = {'present', 'absent'}
def is_valuation(ports, valuations):
for name, port_type in ports.items():
curvaluation = valuations[name]
# functional set membership description ?
if callable(port_type):
ok = port_type(curvaluation)
else:
ok = curvaluation in port_type
if not ok:
raise TypeError('Not a valuation.')
def create_machine_ports(spc_vars):
"""Create proper port domains of valuations, given port types.
@param spc_vars: port names and types inside tulip.
For arbitrary finite types the type can be a list of strings,
instead of a range of integers.
These are as originally defined by the user or synth.
"""
ports = dict()
for env_var, var_type in spc_vars.items():
if var_type == 'boolean':
domain = {0, 1}
elif isinstance(var_type, tuple):
# integer domain
start, end = var_type
domain = set(range(start, end + 1))
elif isinstance(var_type, list):
# arbitrary finite domain defined by list var_type
domain = set(var_type)
ports[env_var] = domain
return ports
class Transducer(LabeledDiGraph):
r"""Sequential Transducer, i.e., a letter-to-letter function.
Inputs
======
P = {p1, p2,...} is the set of input ports.
An input port p takes values in a set Vp.
Set Vp is called the "type" of input port p.
A "valuation" is an assignment of values to the input ports in P.
We call "inputs" the set of pairs::
{(p_i, Vp_i),...}
of input ports p_i and their corresponding types Vp_i.
A guard is a predicate (bool-valued) used as sub-label for a transition.
A guard is defined by a set and evaluated using set membership.
So given an input port value p=x, then if::
x \in guard_set
then the guard is True, otherwise it is False.
The "inputs" are defined by an OrderedDict::
{'p1':explicit, 'p2':check, 'p3':None, ...}
where:
- C{explicit}:
is an iterable representation of Vp,
possible only for discrete Vp.
If 'p1' is explicitly typed, then guards are evaluated directly::
input_port_value == guard_value ?
- C{check}:
is a class with methods:
- C{__contains__(x) }:
check if guard value given to input port 'p1' is
in the set of possible values Vp.
- C{__call__(guard_set, input_port_value) }:
check if C{input_port_value} \in C{guard_set}
This allows symbolic type definitions.
For example, C{input_port_value} might be assigned
int values, but the C{guard_set} be defined by
a symbolic expression as the str: 'x<=5'.
Then the user is responsible for providing
the appropriate method to the Mealy Machine,
using the custom C{check} class described here.
Note that we could provide a rudimentary library
for the basic types of checks, e.g., for
the above simple symbolic case, where using
function eval() is sufficient.
- C{None}:
signifies that no type is currently defined for
this input port, so input type checking and guard
evaluation are disabled.
This can be used to skip type definitions when
they are not needed by the user.
However, since Machines are in general the output
of synthesis, it follows that they are constructed
by code, so the benefits of typedefs will be
considerable compared to the required coding effort.
Guards annotate transitions::
Guards: States x States ---> Input_Predicates
Outputs
=======
Similarly defined to inputs, but:
- for Mealy Machines they annotate transitions
- for Moore Machines they annotate states
State Variables
===============
Similarly defined to inputs, they annotate states,
for both Mealy and Moore machines::
States ---> State_Variables
Update Function
===============
The transition relation:
- for Mealy Machines::
States x Input_Valuations ---> Output_Valuations x States
Note that in the range Output_Valuations are ordered before States
to emphasize that an output_valuation is produced
during the transition, NOT at the next state.
The data structure representation of the update function is
by storage of the Guards function and definition of Guard
evaluation for each input port via the OrderedDict discussed above.
- for Moore Machines::
States x Input_Valuations ---> States
States ---> Output_valuations
Note
====
A transducer may operate on either finite or infinite words, i.e.,
it is not equipped with interpretation semantics on the words,
so it does not "care" about word length.
It continues as long as its input is fed with letters.
For Machines, each state label consists of (possibly multiple) sublabels,
each of which is either a variable, or, only for Moore machines,
may be an output.
See Also
========
FSM, MealyMachine, MooreMachine
"""
def __init__(self):
# values will point to values of _*_label_def below
self.state_vars = dict()
self.inputs = dict()
self.outputs = dict()
# self.set_actions = {}
# state labeling
self._state_label_def = dict()
self._state_dot_label_format = {'type?label': ':',
'separator': r'\\n'}
# edge labeling
self._transition_label_def = dict()
self._transition_dot_label_format = {'type?label': ':',
'separator': r'\\n'}
self._transition_dot_mask = dict()
self._state_dot_mask = dict()
self.default_export_fname = 'fsm'
LabeledDiGraph.__init__(self)
self.dot_node_shape = {'normal': 'ellipse'}
self.default_export_fname = 'fsm'
def add_inputs(self, new_inputs, masks=None):
"""Create new inputs.
@param new_inputs: C{dict} of pairs {port_name : port_type}
where:
- port_name: str
- port_type: Iterable | check class
@type new_inputs: dict
@param masks: custom mask functions, for each sublabel
based on its current value
each such function returns:
- True, if the sublabel should be shown
- False, otherwise (to hide it)
@type masks: C{dict} of functions C{{port_name : mask_function}}
each C{mask_function} returns bool
"""
for port_name, port_type in new_inputs.items():
# append
self._transition_label_def[port_name] = port_type
# inform inputs
self.inputs[port_name] = port_type
# printing format
self._transition_dot_label_format[port_name] = str(port_name)
if masks is None:
continue
if port_name in masks:
mask_func = masks[port_name]
self._transition_dot_mask[port_name] = mask_func
def add_state_vars(self, new_state_vars):
for var_name, var_type in new_state_vars.items():
# append
self._state_label_def[var_name] = var_type
# inform state vars
self.state_vars[var_name] = self._state_label_def[var_name]
# printing format
self._state_dot_label_format[var_name] = str(var_name)
class MooreMachine(Transducer):
"""Moore machine.
A Moore machine implements the discrete dynamics::
x[k+1] = f(x[k], u[k] )
y[k] = g(x[k] )
where:
- k: discrete time = sequence index
- x: state = valuation of state variables
- X: set of states = S
- u: inputs = valuation of input ports
- y: output actions = valuation of output ports
- f: X-> 2^X, transition function
- g: X-> Out, output function
Observe that the output depends only on the state.
Note
====
valuation: assignment of values to each port
Reference
=========
U{[M56]
<https://tulip-control.sourceforge.io/doc/bibliography.html#m56>}
"""
def __init__(self):
"""Instantiate a Moore state machine."""
Transducer.__init__(self)
self.dot_node_shape = {'normal': 'ellipse'}
self.default_export_fname = 'moore'
def __str__(self):
"""Get informal string representation."""
s = (
_hl + '\nMoore Machine: ' + self.name + '\n' + _hl + '\n' +
'State Variables:\n\t(name : type)\n' +
_print_ports(self.state_vars) +
'Input Ports:\n\t(name : type)\n' +
_print_ports(self.inputs) +
'Output Ports:\n\t(name : type)\n' +
_print_ports(self.outputs) +
'States & State Var Values: (state : outputs : vars)\n')
for state, label_dict in self.states(data=True):
s += '\t' + str(state) + ' :\n'
# split into vars and outputs
var_values = {k: v for k, v in label_dict.items()
if k in self.state_vars}
output_values = {k: v for k, v in label_dict.items()
if k in self.outputs}
s += (_print_label(var_values) + ' : ' +
_print_label(output_values))
s += (
'Initial States:\n' +
pformat(self.states.initial, indent=3) + 2 * '\n')
s += 'Transitions & Labels: (from --> to : label)\n'
for from_state, to_state, label_dict in self.transitions(data=True):
s += (
'\t' + str(from_state) + ' ---> ' +
str(to_state) + ' :\n' +
_print_label(label_dict))
s += _hl + '\n'
return s
def add_outputs(self, new_outputs, masks=None):
for port_name, port_type in new_outputs.items():
# append
self._state_label_def[port_name] = port_type
# inform state vars
self.outputs[port_name] = port_type
# printing format
self._state_dot_label_format[port_name] = (
'/' + str(port_name))
if masks is None:
continue
if port_name in masks:
mask_func = masks[port_name]
self._state_dot_mask[port_name] = mask_func
class MealyMachine(Transducer):
"""Mealy machine.
Examples
========
Traffic Light: Fig. 3.14, p.72 U{[LS11]
<https://tulip-control.sourceforge.io/doc/bibliography.html#ls11>}
>>> m = MealyMachine()
>>> pure_signal = {'present', 'absent'}
>>> m.add_inputs([('tick', pure_signal) ])
>>> m.add_outputs([('go', pure_signal), ('stop', pure_signal) ])
>>> m.states.add_from(['red', 'green', 'yellow'])
>>> m.states.initial.add('red')
For brevity:
>>> p = 'present'
>>> a = 'absent'
The transitions can equivalently be defined with dict().
So instead of the previous C{m.transitions.add}, we can use:
>>> label = {'tick':p, 'go':p, 'stop':a}
>>> m.transitions.add('red', 'green', **label)
>>> label = {'tick':p, 'go':a, 'stop':p}
>>> m.transitions.add('green', 'yellow', **label)
>>> label = {'tick':p, 'go':a, 'stop':p}
>>> m.transitions.add('yellow', 'red', **label)
This avoids any ordering issues, i.e., changing the
order of the sublabels does not matter:
>>> label = {'go':p, 'tick':p, 'stop':a}
>>> m.transitions.add('red', 'green', **label)
Theory
======
A Mealy machine implements the discrete dynamics::
x[k+1] = f(x[k], u[k] )
y[k] = g(x[k], u[k] )
where:
- k: discrete time = sequence index
- x: state = valuation of state variables
- X: set of states = S
- u: inputs = valuation of input ports
- y: output actions = valuation of output ports
- f: X-> 2^X, transition function
- g: X-> Out, output function
Observe that the output is defined when a reaction occurs to an input.
Note
====
valuation: assignment of values to each port
Reference
=========
U{[M55]
<https://tulip-control.sourceforge.io/doc/bibliography.html#m55>}
"""
def __init__(self):
Transducer.__init__(self)
# will point to selected values of self._transition_label_def
self.dot_node_shape = {'normal': 'ellipse'}
self.default_export_fname = 'mealy'
def __str__(self):
"""Get informal string representation."""
s = (
_hl + '\nMealy Machine: ' + self.name + '\n' + _hl + '\n' +
'State Variables:\n\t(name : type)\n' +
_print_ports(self.state_vars))
s += 'States & State Var Values:\n'
for state, label_dict in self.states(data=True):
s += ('\t' + str(state) + ' :\n' +
_print_label(label_dict))
s += (
'Initial States:\n' +
pformat(self.states.initial, indent=3) + 2 * '\n' +
'Input Ports:\n\t(name : type)\n' +
_print_ports(self.inputs) +
'Output Ports:\n\t(name : type)\n' +
_print_ports(self.outputs) +
'Transitions & Labels: (from --> to : label)\n')
for from_state, to_state, label_dict in self.transitions(data=True):
s += (
'\t' + str(from_state) + ' ---> ' +
str(to_state) + ' :\n' +
_print_label(label_dict))
s += _hl + '\n'
return s
def _save(self, path, fileformat):
"""Export options available only for Mealy machines.
@type fileformat: 'scxml'
"""
if fileformat != 'scxml':
return False
from tulip.transys.export import machine2scxml
s = machine2scxml.mealy2scxml(self)
# dump to file
f = open(path, 'w')
f.write(s)
f.close()
return True
def add_outputs(self, new_outputs, masks=None):
"""Add new outputs.
@param new_outputs: dict of pairs {port_name : port_type}
where:
- port_name: str
- port_type: Iterable | check class
@type new_outputs: dict
@param masks: custom mask functions, for each sublabel
based on its current value
each such function returns:
- True, if the sublabel should be shown
- False, otherwise (to hide it)
@type masks: dict of functions
keys are port_names (see arg: new_outputs)
each function returns bool
"""
for port_name, port_type in new_outputs.items():
# append
self._transition_label_def[port_name] = port_type
# inform state vars
self.outputs[port_name] = (
self._transition_label_def[port_name])
# printing format
self._transition_dot_label_format[port_name] = (
'/' + str(port_name))
if masks is None:
continue
if port_name in masks:
mask_func = masks[port_name]
self._transition_dot_mask[port_name] = mask_func
def reaction(self, from_state, inputs, lazy=False):
"""Return next state and output, when reacting to given inputs.
The machine must be deterministic.
(for each state and input at most a single transition enabled,
this notion does not coincide with output-determinism)
Not exactly a wrapper of L{Transitions.find},
because it matches only that part of an edge label
that corresponds to the inputs.
@param from_state: transition starts from this state.
@type from_state: element of C{self.states}
@param inputs: C{dict} assigning a valid value to each input port.
@type inputs: {'port_name':port_value, ...}
@param lazy: Lazy evaluation of inputs? If lazy=True, then
allow an incomplete specification of input if there is
precisely one enabled transition.
@type lazy: bool
@return: output values and next state.
@rtype: (next_state, outputs)
where C{outputs}: C{{'port_name':port_value, ...}}
"""
if lazy:
restricted_inputs = set(self.inputs).intersection(inputs.keys())
else:
restricted_inputs = self.inputs
# match only inputs (explicit valuations, not symbolic)
enabled_trans = [
(i, j, d)
for i, j, d in self.edges([from_state], data=True)
if project_dict(d, restricted_inputs) == inputs]
if len(enabled_trans) == 0:
some_possibilities = []
for i, j, d in self.edges([from_state], data=True):
# The number of possible inputs to suggest here is
# arbitrary. Consider making it a function parameter.
if len(some_possibilities) >= 5:
break
possible_inputs = project_dict(d, restricted_inputs)
if possible_inputs not in some_possibilities:
some_possibilities.append(possible_inputs)
# must be deterministic
try:
((_, next_state, attr_dict), ) = enabled_trans
except ValueError:
if len(enabled_trans) == 0:
if len(some_possibilities) == 0:
raise Exception(
'state {from_state} is a dead-end. '
'There are no possible inputs from '
'it.'.format(from_state=from_state))
else:
raise Exception(
'not a valid input, '
'some possible inputs include: '
'{t}'.format(t=some_possibilities))
else:
raise Exception(
'must be input-deterministic, '
'found enabled transitions: '
'{t}'.format(t=enabled_trans))
outputs = project_dict(attr_dict, self.outputs)
return (next_state, outputs)
def reactionpart(self, from_state, inputs):
"""Wraps reaction() with lazy=True
"""
return self.reaction(from_state, inputs, lazy=True)
def run(self, from_state=None, input_sequences=None):
"""Guided or interactive run.
@param input_sequences: if C{None}, then call L{interactive_run},
otherwise call L{guided_run}.
@return: output of L{guided_run}, otherwise C{None}.
"""
if input_sequences is None:
interactive_run(self, from_state=from_state)
else:
return guided_run(self, from_state=from_state,
input_sequences=input_sequences)
def guided_run(mealy, from_state=None, input_sequences=None):
"""Run deterministic machine reacting to given inputs.
@param from_state: start simulation
@param mealy: input-deterministic Mealy machine
@type mealy: L{MealyMachine}
@param from_state: start simulation at this state.
If C{None}, then use the unique initial state C{Sinit}.
@param input_sequences: one sequence of values for each input port
@type input_sequences: C{dict} of C{lists}
@return: sequence of states and sequence of output valuations
@rtype: (states, output_sequences)
where:
- C{states} is a C{list} of states excluding C{from_state}
- C{output_sequences} is a C{dict} of C{lists}
"""
seqs = input_sequences # abbrv
missing_ports = set(mealy.inputs).difference(seqs)
if missing_ports:
raise ValueError('missing input port(s): ' + missing_ports)
# dict of lists ?
non_lists = {k: v for k, v in seqs.items() if not isinstance(v, list)}
if non_lists:
raise TypeError('Values must be lists, for: ' + str(non_lists))
# uniform list len ?
if len(set(len(x) for x in seqs.values())) > 1:
raise ValueError('All input sequences must be of equal length.')
# note: initial sys state non-determinism not checked
# initial sys edge non-determinism checked instead (more restrictive)
if from_state is None:
state = next(iter(mealy.states.initial))
else:
state = from_state
n = len(next(iter(seqs.values())))
states_seq = []
output_seqs = {k: list() for k in mealy.outputs}
for i in range(n):
inputs = {k: v[i] for k, v in seqs.items()}
state, outputs = mealy.reaction(state, inputs)
states_seq.append(state)
for k in output_seqs:
output_seqs[k].append(outputs[k])
return (states_seq, output_seqs)
def random_run(mealy, from_state=None, N=10):
"""Return run from given state for N random inputs.
Inputs selected randomly in a way that does not block the machine
So they are not arbitrarily random.
If the machine is a valid synthesis solution,
then all safe environment inputs can be generated this way.
Randomly generated inputs may violate liveness assumption on environment.
@param mealy: input-deterministic Mealy machine
@type mealy: C{MealyMachine}
@param N: number of reactions (inputs)
@type N: int
@return: same as L{guided_run}
"""
if from_state is None:
state = next(iter(mealy.states.initial))
else:
state = from_state
states_seq = []
output_seqs = {k: list() for k in mealy.outputs}
for i in range(N):
trans = mealy.transitions.find([state])
# choose next transition
selected_trans = choice(list(trans))
_, new_state, attr_dict = selected_trans
# extend execution trace
states_seq.append(new_state)
# extend output traces
outputs = project_dict(attr_dict, mealy.outputs)
for k in output_seqs:
output_seqs[k].append(outputs[k])
# updates
old_state = state
state = new_state
# printing
inputs = project_dict(attr_dict, mealy.inputs)
print(
'move from\n\t state: ' + str(old_state) +
'\n\t with input:' + str(inputs) +
'\n\t to state: ' + str(new_state) +
'\n\t reacting by producing output: ' + str(outputs))
return (states_seq, output_seqs)
def interactive_run(mealy, from_state=None):
"""Run input-deterministic Mealy machine using user input.
@param mealy: input-deterministic Mealy machine
@type mealy: L{MealyMachine}
"""
if from_state is None:
state = next(iter(mealy.states.initial))
else:
state = from_state
while True:
print('\n Current state: ' + str(state))
if _interactive_run_step(mealy, state) is None:
break
def _interactive_run_step(mealy, state):
if state is None:
raise Exception('Current state is None')
# note: the spaghettiness of previous version was caused
# by interactive simulation allowing both output-non-determinism
# and implementing spawning (which makes sense only for generators,
# *not* for transducers)
trans = mealy.transitions.find([state])
if not trans:
print('Stop: no outgoing transitions.')
return None
while True:
try:
selected_trans = _select_transition(mealy, trans)
except:
print('Selection not recognized. Please try again.')
if selected_trans is None:
return None
(from_, to_state, attr_dict) = selected_trans
inputs = project_dict(attr_dict, mealy.inputs)
outputs = project_dict(attr_dict, mealy.outputs)
print(
'Moving from state: ' + str(state) +
', to state: ' + str(to_state) + '\n' +
'given inputs: ' + str(inputs) + '\n' +
'reacting with outputs: ' + str(outputs))
return True
def _select_transition(mealy, trans):
msg = 'Found more than 1 outgoing transitions:' + 2 * '\n'
for i, t in enumerate(trans):
(from_state, to_state, attr_dict) = t
inputs = project_dict(attr_dict, mealy.inputs)
outputs = project_dict(attr_dict, mealy.outputs)
msg += (
'\t' + str(i) + ' : ' +
str(from_state) + ' ---> ' + str(to_state) + '\n' +
'\t inputs:' + str(inputs) +
'\t outputs:' + str(outputs) +
'\n\n')
msg += (
'\n' +
'Select from the available transitions above\n' +
'by giving its integer,\n' +
'Press "Enter" to stop the simulation:\n' +
'\t int = ')
import sys
print(msg)
id_selected = sys.stdin.readline().rstrip('\r\n')
if not id_selected:
return None
return trans[int(id_selected)]
def moore2mealy(moore):
"""Convert Moore machine to equivalent Mealy machine.
Reference
=========
U{[LS11]
<https://tulip-control.sourceforge.io/doc/bibliography.html#ls11>}
@type moore: L{MooreMachine}
@rtype: L{MealyMachine}
"""
if not isinstance(moore, MooreMachine):
raise TypeError('moore must be a MooreMachine')
mealy = MealyMachine()
# cp inputs
for port_name, port_type in moore.inputs.items():
mask_func = moore._transition_dot_mask.get(port_name)
if mask_func is None:
masks = None
else:
masks = {port_name: mask_func}
mealy.add_inputs({port_name: port_type}, masks=masks)
# cp outputs
for port_name, port_type in moore.outputs.items():
mask_func = moore._state_dot_mask.get(port_name)
if mask_func is None:
masks = None
else:
masks = {port_name: mask_func}
mealy.add_outputs({port_name: port_type}, masks=masks)
# cp states
mealy.states.add_from(moore.states())
mealy.states.initial.add_from(moore.states.initial)
# cp transitions
for si in moore:
output_values = {
k: v for k, v in moore.states[si].items()
if k in moore.outputs}
output_values = copy.deepcopy(output_values)
for si_, sj, attr_dict in moore.transitions.find(si):
# note that we don't filter only input ports,
# so other edge annotation is preserved
attr_dict = copy.deepcopy(attr_dict)
attr_dict.update(output_values)
mealy.transitions.add(si, sj, attr_dict)
return mealy
def mealy2moore(mealy):
"""Convert Mealy machine to almost equivalent Moore machine.
A Mealy machine cannot be transformed to an equivalent Moore machine.
It can be converted to a Moore machine with an arbitrary initial output,
which outputs the Mealy output at its next reaction.
Reference
=========
U{[LS11]
<https://tulip-control.sourceforge.io/doc/bibliography.html#ls11>}
@type mealy: L{MealyMachine}
@rtype: L{MooreMachine}
"""
# TODO: check for when Mealy is exactly convertible to Moore
if not isinstance(mealy, MealyMachine):
raise TypeError('moore must be a MealyMachine')
moore = MooreMachine()
# cp inputs
for port_name, port_type in mealy.inputs.items():
mask_func = mealy._transition_dot_mask.get(port_name)
if mask_func is None:
masks = None
else:
masks = {port_name: mask_func}
moore.add_inputs({port_name: port_type}, masks=masks)
# cp outputs
for port_name, port_type in mealy.outputs.items():
mask_func = mealy._transition_dot_mask.get(port_name)
if mask_func is None:
masks = None
else:
masks = {port_name: mask_func}
moore.add_outputs({port_name: port_type}, masks=masks)
# initial state with arbitrary label
out = {k: list(v)[0] for k, v in mealy.outputs.items()}
s0 = list(mealy.states.initial)[0]
# create maps between Moore and Mealy states
moore2mealy_states = dict() # {qj : si} (function)
mealy2moore_states = dict() # {si : {qj, qk, ...} } (relation)
new_s0 = _create_state_str(
s0, out, moore, moore2mealy_states,
mealy2moore_states)
moore.states.add(new_s0, out)
moore.states.initial.add(new_s0)
# cp transitions and create appropriate states
Q = set()
S = set()
Q.add(new_s0)
S.add(new_s0)
while Q:
new_si = Q.pop()
si = moore2mealy_states[new_si]
for si_, sj, attr_dict in mealy.transitions.find(si):
in_values, out_values = _split_io(attr_dict, mealy)
new_sj = _create_state_str(
sj, out_values, moore, moore2mealy_states,
mealy2moore_states)
moore.transitions.add(new_si, new_sj, in_values)
if new_sj not in S:
Q.add(new_sj)
S.add(new_sj)
return moore
def _print_ports(port_dict):
s = ''
for port_name, port_type in port_dict.items():
s += '\t' + str(port_name) + ' : '
s += pformat(port_type) + '\n'
s += '\n'
return s
def _print_label(label_dict):
s = ''
for name, value in label_dict.items():
s += '\t\t' + str(name) + ' : ' + str(value) + '\n'
s += '\n'
return s
def _create_state_str(mealy_state, output, moore,
moore2mealy_states,
mealy2moore_states):
"""Used to create Moore states when converting Mealy -> Moore."""
for s in mealy2moore_states.setdefault(mealy_state, set()):
# check output values
if moore.states[s] == output:
return s
# create new
n = len(moore)
s = 's' + str(n)
moore.states.add(s, output)
moore2mealy_states[s] = mealy_state
mealy2moore_states[mealy_state].add(s)
return s
def _split_io(attr_dict, machine):
"""Split into inputs and outputs."""
input_values = {k: v for k, v in attr_dict.items()
if k in machine.inputs}
output_values = {k: v for k, v in attr_dict.items()
if k in machine.outputs}
return input_values, output_values
project_dict = lambda x, y: {k: x[k] for k in x if k in y}
trim_dict = lambda x, y: {k: x[k] for k in x if k not in y}
def strip_ports(mealy, names):
"""Remove ports in C{names}.
For example, to remove the atomic propositions
labeling the transition system C{ts} used
(so they are dependent variables), call it as:
>>> strip_ports(mealy, ts.atomic_propositions)
@type mealy: L{MealyMachine}
@type names: iterable container of C{str}
"""
new = MealyMachine()
new.add_inputs(trim_dict(mealy.inputs, names))
new.add_outputs(trim_dict(mealy.outputs, names))
new.add_nodes_from(mealy)
new.states.initial.add_from(mealy.states.initial)
for u, v, d in mealy.edges(data=True):
d = trim_dict(d, names)
new.add_edge(u, v, **d)
return new
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the swig wrapper tf_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training as train
class MemoryOptimizerSwapTest(test.TestCase):
"""Tests the Grappler memory optimizer."""
def testNoSwapping(self):
"""Make sure the graph is preserved when there is nothing to swap."""
a = variables.Variable(10, name='a')
b = variables.Variable(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
graph_size = len(mg.graph_def.node)
nodes = [node.name for node in mg.graph_def.node]
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)
self.assertEqual(len(graph.node), graph_size)
self.assertItemsEqual([node.name for node in graph.node], nodes)
def testSimpleSwap(self):
"""Check that the swap annotations are followed."""
a = variables.Variable(10, name='a')
b = variables.Variable(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
d.op.node_def.attr['_swap_to_host'].i = 0
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
graph_size = len(mg.graph_def.node)
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)
self.assertEqual(len(graph.node), graph_size + 2)
self.assertTrue(
set([node.name for node in graph.node]) > set(
['a', 'b', 'c', 'd', 'swap_in_d_0', 'swap_out_d_0']))
for node in graph.node:
if node.name == 'swap_in_d_0':
self.assertEqual('swap_out_d_0', node.input[0])
self.assertEqual('^b/read', node.input[1])
elif node.name == 'swap_out_d_0':
self.assertEqual('b/read', node.input[0])
elif node.name == 'd':
self.assertEqual('swap_in_d_0', node.input[0])
self.assertEqual('c', node.input[1])
class MemoryOptimizerRecomputeTest(test.TestCase):
"""Tests the Python interface to recomputation rewrites.
See core/grappler/optimizers/memory_optimizer_test.cc for functional tests.
"""
def _GetMetaGraph(self, batch_size=14, image_dim=12, optimizer_scope_name=''):
"""A simple layered graph with conv, an intermediate op, and a ReLU."""
graph = ops.Graph()
with graph.as_default():
random_seed.set_random_seed(1)
current_activation = variable_scope.get_variable(
name='start', shape=[batch_size, image_dim, image_dim, 5])
conv_filter = variable_scope.get_variable(
name='filter', shape=[5, 5, 5, 5])
for layer_number in range(10):
with variable_scope.variable_scope('layer_{}'.format(layer_number)):
after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
'SAME')
current_activation = 2. * after_conv
current_activation = nn.relu(current_activation)
loss = math_ops.reduce_mean(current_activation)
with ops.name_scope(optimizer_scope_name):
optimizer = train.AdamOptimizer(0.001)
train_op = optimizer.minimize(loss)
init_op = variables.global_variables_initializer()
metagraph = train.export_meta_graph()
return (metagraph, init_op.name, train_op.name, loss.name)
def testRewritingDefaultGradientNames(self):
"""Tests that rewriting occurs with default gradient names."""
(original_metagraph, _, _, _) = self._GetMetaGraph()
rewritten_graph_def = tf_optimizer.OptimizeGraph(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.
RECOMPUTATION_HEURISTICS), original_metagraph)
self.assertGreater(
len(rewritten_graph_def.node),
len(original_metagraph.graph_def.node))
self.assertEqual(
0,
len([node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name]))
self.assertEqual(
20, # Two per layer
len([node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name]))
def testRewritingNameScopedGradientNames(self):
"""Tests that rewriting occurs with non-standard gradient names."""
(original_metagraph, _, _, _) = self._GetMetaGraph(
optimizer_scope_name='optimizer')
rewritten_graph_def = tf_optimizer.OptimizeGraph(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.
RECOMPUTATION_HEURISTICS,
memory_optimizer_target_node_name_prefix='optimizer/gradients/'),
original_metagraph)
self.assertGreater(
len(rewritten_graph_def.node),
len(original_metagraph.graph_def.node))
self.assertEqual(
0,
len([node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name]))
self.assertEqual(
20, # Two per layer
len([node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name]))
def _GetMemoryOptimizerSessionConfig(self):
rewrite_options = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS)
graph_options = config_pb2.GraphOptions(rewrite_options=rewrite_options)
return config_pb2.ConfigProto(graph_options=graph_options)
def _RunMetaGraphWithConfig(
self, config, metagraph, init_op_name, train_op_name, loss_op_name):
graph = ops.Graph()
with graph.as_default():
train.import_meta_graph(metagraph)
init_op = graph.get_operation_by_name(init_op_name)
train_op = graph.get_operation_by_name(train_op_name)
loss_op = graph.get_tensor_by_name(loss_op_name)
with session.Session(config=config, graph=graph) as sess:
sess.run(init_op)
sess.run(train_op)
sess.run(train_op)
return sess.run(loss_op)
def testRecomputationRewritingNoErrors(self):
"""Tests that graph output is not significantly different with rewriting."""
(original_metagraph, init_op_name, train_op_name, loss_op_name
) = self._GetMetaGraph()
original_loss = self._RunMetaGraphWithConfig(
config=config_pb2.ConfigProto(),
metagraph=original_metagraph,
init_op_name=init_op_name,
train_op_name=train_op_name,
loss_op_name=loss_op_name)
memory_optimized_loss = self._RunMetaGraphWithConfig(
config=self._GetMemoryOptimizerSessionConfig(),
metagraph=original_metagraph,
init_op_name=init_op_name,
train_op_name=train_op_name,
loss_op_name=loss_op_name)
self.assertAllClose(original_loss, memory_optimized_loss, rtol=1e-4)
if __name__ == '__main__':
test.main()
| |
# Copyright 2009-2015 MongoDB, Inc.
# Modifications copyright (C) 2018 Gabriel Leopoldino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the objectid module."""
import datetime
import os
import pickle
import sys
import struct
sys.path[0:0] = [""]
from bson.objectid import ObjectId, _fnv_1a_24
from bson.objectid import InvalidId
from bson.py3compat import PY3, _unicode
from bson.tz_util import (FixedOffset,
utc)
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
from unittest2 import SkipTest
else:
import unittest
from unittest import SkipTest
# fnv_1a_24 tests taken from MongoDB Python Driver at https://github.com/mongodb/mongo-python-driver/commit/61850357a0e0eeec1a30e1adc0bbf7ebee807358
# Landon Curt Noll's test suite for FNV-1a32
# http://isthe.com/chongo/src/fnv/test_fnv.c
TEST = lambda x: x
TEST0 = lambda x: x + b"\x00"
R10 = lambda x: x * 10
R500 = lambda x: x * 500
fnv_test_str = [
TEST(b""),
TEST(b"a"),
TEST(b"b"),
TEST(b"c"),
TEST(b"d"),
TEST(b"e"),
TEST(b"f"),
TEST(b"fo"),
TEST(b"foo"),
TEST(b"foob"),
TEST(b"fooba"),
TEST(b"foobar"),
TEST0(b""),
TEST0(b"a"),
TEST0(b"b"),
TEST0(b"c"),
TEST0(b"d"),
TEST0(b"e"),
TEST0(b"f"),
TEST0(b"fo"),
TEST0(b"foo"),
TEST0(b"foob"),
TEST0(b"fooba"),
TEST0(b"foobar"),
TEST(b"ch"),
TEST(b"cho"),
TEST(b"chon"),
TEST(b"chong"),
TEST(b"chongo"),
TEST(b"chongo "),
TEST(b"chongo w"),
TEST(b"chongo wa"),
TEST(b"chongo was"),
TEST(b"chongo was "),
TEST(b"chongo was h"),
TEST(b"chongo was he"),
TEST(b"chongo was her"),
TEST(b"chongo was here"),
TEST(b"chongo was here!"),
TEST(b"chongo was here!\n"),
TEST0(b"ch"),
TEST0(b"cho"),
TEST0(b"chon"),
TEST0(b"chong"),
TEST0(b"chongo"),
TEST0(b"chongo "),
TEST0(b"chongo w"),
TEST0(b"chongo wa"),
TEST0(b"chongo was"),
TEST0(b"chongo was "),
TEST0(b"chongo was h"),
TEST0(b"chongo was he"),
TEST0(b"chongo was her"),
TEST0(b"chongo was here"),
TEST0(b"chongo was here!"),
TEST0(b"chongo was here!\n"),
TEST(b"cu"),
TEST(b"cur"),
TEST(b"curd"),
TEST(b"curds"),
TEST(b"curds "),
TEST(b"curds a"),
TEST(b"curds an"),
TEST(b"curds and"),
TEST(b"curds and "),
TEST(b"curds and w"),
TEST(b"curds and wh"),
TEST(b"curds and whe"),
TEST(b"curds and whey"),
TEST(b"curds and whey\n"),
TEST0(b"cu"),
TEST0(b"cur"),
TEST0(b"curd"),
TEST0(b"curds"),
TEST0(b"curds "),
TEST0(b"curds a"),
TEST0(b"curds an"),
TEST0(b"curds and"),
TEST0(b"curds and "),
TEST0(b"curds and w"),
TEST0(b"curds and wh"),
TEST0(b"curds and whe"),
TEST0(b"curds and whey"),
TEST0(b"curds and whey\n"),
TEST(b"hi"), TEST0(b"hi"),
TEST(b"hello"), TEST0(b"hello"),
TEST(b"\xff\x00\x00\x01"), TEST(b"\x01\x00\x00\xff"),
TEST(b"\xff\x00\x00\x02"), TEST(b"\x02\x00\x00\xff"),
TEST(b"\xff\x00\x00\x03"), TEST(b"\x03\x00\x00\xff"),
TEST(b"\xff\x00\x00\x04"), TEST(b"\x04\x00\x00\xff"),
TEST(b"\x40\x51\x4e\x44"), TEST(b"\x44\x4e\x51\x40"),
TEST(b"\x40\x51\x4e\x4a"), TEST(b"\x4a\x4e\x51\x40"),
TEST(b"\x40\x51\x4e\x54"), TEST(b"\x54\x4e\x51\x40"),
TEST(b"127.0.0.1"), TEST0(b"127.0.0.1"),
TEST(b"127.0.0.2"), TEST0(b"127.0.0.2"),
TEST(b"127.0.0.3"), TEST0(b"127.0.0.3"),
TEST(b"64.81.78.68"), TEST0(b"64.81.78.68"),
TEST(b"64.81.78.74"), TEST0(b"64.81.78.74"),
TEST(b"64.81.78.84"), TEST0(b"64.81.78.84"),
TEST(b"feedface"), TEST0(b"feedface"),
TEST(b"feedfacedaffdeed"), TEST0(b"feedfacedaffdeed"),
TEST(b"feedfacedeadbeef"), TEST0(b"feedfacedeadbeef"),
TEST(b"line 1\nline 2\nline 3"),
TEST(b"chongo <Landon Curt Noll> /\\../\\"),
TEST0(b"chongo <Landon Curt Noll> /\\../\\"),
TEST(b"chongo (Landon Curt Noll) /\\../\\"),
TEST0(b"chongo (Landon Curt Noll) /\\../\\"),
TEST(b"http://antwrp.gsfc.nasa.gov/apod/astropix.html"),
TEST(b"http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash"),
TEST(b"http://epod.usra.edu/"),
TEST(b"http://exoplanet.eu/"),
TEST(b"http://hvo.wr.usgs.gov/cam3/"),
TEST(b"http://hvo.wr.usgs.gov/cams/HMcam/"),
TEST(b"http://hvo.wr.usgs.gov/kilauea/update/deformation.html"),
TEST(b"http://hvo.wr.usgs.gov/kilauea/update/images.html"),
TEST(b"http://hvo.wr.usgs.gov/kilauea/update/maps.html"),
TEST(b"http://hvo.wr.usgs.gov/volcanowatch/current_issue.html"),
TEST(b"http://neo.jpl.nasa.gov/risk/"),
TEST(b"http://norvig.com/21-days.html"),
TEST(b"http://primes.utm.edu/curios/home.php"),
TEST(b"http://slashdot.org/"),
TEST(b"http://tux.wr.usgs.gov/Maps/155.25-19.5.html"),
TEST(b"http://volcano.wr.usgs.gov/kilaueastatus.php"),
TEST(b"http://www.avo.alaska.edu/activity/Redoubt.php"),
TEST(b"http://www.dilbert.com/fast/"),
TEST(b"http://www.fourmilab.ch/gravitation/orbits/"),
TEST(b"http://www.fpoa.net/"),
TEST(b"http://www.ioccc.org/index.html"),
TEST(b"http://www.isthe.com/cgi-bin/number.cgi"),
TEST(b"http://www.isthe.com/chongo/bio.html"),
TEST(b"http://www.isthe.com/chongo/index.html"),
TEST(b"http://www.isthe.com/chongo/src/calc/lucas-calc"),
TEST(b"http://www.isthe.com/chongo/tech/astro/venus2004.html"),
TEST(b"http://www.isthe.com/chongo/tech/astro/vita.html"),
TEST(b"http://www.isthe.com/chongo/tech/comp/c/expert.html"),
TEST(b"http://www.isthe.com/chongo/tech/comp/calc/index.html"),
TEST(b"http://www.isthe.com/chongo/tech/comp/fnv/index.html"),
TEST(b"http://www.isthe.com/chongo/tech/math/number/howhigh.html"),
TEST(b"http://www.isthe.com/chongo/tech/math/number/number.html"),
TEST(b"http://www.isthe.com/chongo/tech/math/prime/mersenne.html"),
TEST(b"http://www.isthe.com/chongo/tech/math/prime/mersenne.html#largest"),
TEST(b"http://www.lavarnd.org/cgi-bin/corpspeak.cgi"),
TEST(b"http://www.lavarnd.org/cgi-bin/haiku.cgi"),
TEST(b"http://www.lavarnd.org/cgi-bin/rand-none.cgi"),
TEST(b"http://www.lavarnd.org/cgi-bin/randdist.cgi"),
TEST(b"http://www.lavarnd.org/index.html"),
TEST(b"http://www.lavarnd.org/what/nist-test.html"),
TEST(b"http://www.macosxhints.com/"),
TEST(b"http://www.mellis.com/"),
TEST(b"http://www.nature.nps.gov/air/webcams/parks/havoso2alert/havoalert.cfm"),
TEST(b"http://www.nature.nps.gov/air/webcams/parks/havoso2alert/timelines_24.cfm"),
TEST(b"http://www.paulnoll.com/"),
TEST(b"http://www.pepysdiary.com/"),
TEST(b"http://www.sciencenews.org/index/home/activity/view"),
TEST(b"http://www.skyandtelescope.com/"),
TEST(b"http://www.sput.nl/~rob/sirius.html"),
TEST(b"http://www.systemexperts.com/"),
TEST(b"http://www.tq-international.com/phpBB3/index.php"),
TEST(b"http://www.travelquesttours.com/index.htm"),
TEST(b"http://www.wunderground.com/global/stations/89606.html"),
TEST(R10(b"21701")),
TEST(R10(b"M21701")),
TEST(R10(b"2^21701-1")),
TEST(R10(b"\x54\xc5")),
TEST(R10(b"\xc5\x54")),
TEST(R10(b"23209")),
TEST(R10(b"M23209")),
TEST(R10(b"2^23209-1")),
TEST(R10(b"\x5a\xa9")),
TEST(R10(b"\xa9\x5a")),
TEST(R10(b"391581216093")),
TEST(R10(b"391581*2^216093-1")),
TEST(R10(b"\x05\xf9\x9d\x03\x4c\x81")),
TEST(R10(b"FEDCBA9876543210")),
TEST(R10(b"\xfe\xdc\xba\x98\x76\x54\x32\x10")),
TEST(R10(b"EFCDAB8967452301")),
TEST(R10(b"\xef\xcd\xab\x89\x67\x45\x23\x01")),
TEST(R10(b"0123456789ABCDEF")),
TEST(R10(b"\x01\x23\x45\x67\x89\xab\xcd\xef")),
TEST(R10(b"1032547698BADCFE")),
TEST(R10(b"\x10\x32\x54\x76\x98\xba\xdc\xfe")),
TEST(R500(b"\x00")),
TEST(R500(b"\x07")),
TEST(R500(b"~")),
TEST(R500(b"\x7f"))
]
fnv1a_24_vector = {
fnv_test_str[0]: 0x1c9d44,
fnv_test_str[1]: 0x0c29c8,
fnv_test_str[2]: 0x0c2d02,
fnv_test_str[3]: 0x0c2cb4,
fnv_test_str[4]: 0x0c2492,
fnv_test_str[5]: 0x0c2200,
fnv_test_str[6]: 0x0c277a,
fnv_test_str[7]: 0x22e820,
fnv_test_str[8]: 0xf37e7e,
fnv_test_str[9]: 0x5076d0,
fnv_test_str[10]: 0xaaa1b3,
fnv_test_str[11]: 0x9cf9d7,
fnv_test_str[12]: 0x0c5d1a,
fnv_test_str[13]: 0x24d06f,
fnv_test_str[14]: 0x2c3fe2,
fnv_test_str[15]: 0x29c561,
fnv_test_str[16]: 0x1d61b0,
fnv_test_str[17]: 0x1ae633,
fnv_test_str[18]: 0x2255de,
fnv_test_str[19]: 0xf39f58,
fnv_test_str[20]: 0x50ac14,
fnv_test_str[21]: 0xab3aa7,
fnv_test_str[22]: 0x9c4c6f,
fnv_test_str[23]: 0x1c9eb4,
fnv_test_str[24]: 0x299f11,
fnv_test_str[25]: 0x85801c,
fnv_test_str[26]: 0x29778b,
fnv_test_str[27]: 0x46b985,
fnv_test_str[28]: 0x564ec0,
fnv_test_str[29]: 0xdd5c0c,
fnv_test_str[30]: 0x77eded,
fnv_test_str[31]: 0xca9677,
fnv_test_str[32]: 0xeb9b9a,
fnv_test_str[33]: 0xe67a30,
fnv_test_str[34]: 0xd32f6a,
fnv_test_str[35]: 0x743fc8,
fnv_test_str[36]: 0x006376,
fnv_test_str[37]: 0x9c99cb,
fnv_test_str[38]: 0x8524b9,
fnv_test_str[39]: 0x993001,
fnv_test_str[40]: 0x85c7d6,
fnv_test_str[41]: 0x29fe8b,
fnv_test_str[42]: 0x469249,
fnv_test_str[43]: 0x56698e,
fnv_test_str[44]: 0xdd8e4c,
fnv_test_str[45]: 0x787611,
fnv_test_str[46]: 0xca6243,
fnv_test_str[47]: 0xeaf0e4,
fnv_test_str[48]: 0xe648b0,
fnv_test_str[49]: 0xd355aa,
fnv_test_str[50]: 0x740522,
fnv_test_str[51]: 0x004d4e,
fnv_test_str[52]: 0x9c09a7,
fnv_test_str[53]: 0x84f129,
fnv_test_str[54]: 0x993a9d,
fnv_test_str[55]: 0x27dfcd,
fnv_test_str[56]: 0x298129,
fnv_test_str[57]: 0x5637c9,
fnv_test_str[58]: 0xb9140f,
fnv_test_str[59]: 0x5bf5a7,
fnv_test_str[60]: 0xc42805,
fnv_test_str[61]: 0xcc0e97,
fnv_test_str[62]: 0x3b4c5d,
fnv_test_str[63]: 0x59f0a7,
fnv_test_str[64]: 0x94de0b,
fnv_test_str[65]: 0x5a0a72,
fnv_test_str[66]: 0xbee56f,
fnv_test_str[67]: 0x8363fd,
fnv_test_str[68]: 0xd5346c,
fnv_test_str[69]: 0xa14715,
fnv_test_str[70]: 0x56b1b5,
fnv_test_str[71]: 0xb8e81f,
fnv_test_str[72]: 0x5b4a33,
fnv_test_str[73]: 0xc3f6c5,
fnv_test_str[74]: 0xcc3f23,
fnv_test_str[75]: 0x3b0a59,
fnv_test_str[76]: 0x59c467,
fnv_test_str[77]: 0x9510cb,
fnv_test_str[78]: 0x59bdc4,
fnv_test_str[79]: 0xbf0b0f,
fnv_test_str[80]: 0x83ff3d,
fnv_test_str[81]: 0xd54252,
fnv_test_str[82]: 0xa156e9,
fnv_test_str[83]: 0xe2d780,
fnv_test_str[84]: 0x3af6f2,
fnv_test_str[85]: 0xd234c0,
fnv_test_str[86]: 0x9f2ce4,
fnv_test_str[87]: 0x935133,
fnv_test_str[88]: 0x8fb8a9,
fnv_test_str[89]: 0x69f34b,
fnv_test_str[90]: 0x8fb375,
fnv_test_str[91]: 0xef1266,
fnv_test_str[92]: 0x8fb585,
fnv_test_str[93]: 0xc3bfd1,
fnv_test_str[94]: 0x8fb031,
fnv_test_str[95]: 0xe4d46f,
fnv_test_str[96]: 0x17906a,
fnv_test_str[97]: 0x0bfece,
fnv_test_str[97]: 0x0bfece,
fnv_test_str[98]: 0x178d02,
fnv_test_str[99]: 0xaddad9,
fnv_test_str[100]: 0x17a9ca,
fnv_test_str[101]: 0x2633a1,
fnv_test_str[102]: 0xa3d116,
fnv_test_str[103]: 0xe2328d,
fnv_test_str[104]: 0xa3cf8c,
fnv_test_str[105]: 0xdfb740,
fnv_test_str[106]: 0xa3cdfe,
fnv_test_str[107]: 0xdd3d03,
fnv_test_str[108]: 0x5636ba,
fnv_test_str[109]: 0xb80830,
fnv_test_str[110]: 0x53e841,
fnv_test_str[111]: 0x16b9a9,
fnv_test_str[112]: 0x5b8948,
fnv_test_str[113]: 0x1a202b,
fnv_test_str[114]: 0x88b139,
fnv_test_str[115]: 0x2f0186,
fnv_test_str[116]: 0x364109,
fnv_test_str[117]: 0x69b55d,
fnv_test_str[118]: 0x7604b9,
fnv_test_str[119]: 0xc8bd3c,
fnv_test_str[120]: 0xb4eab4,
fnv_test_str[121]: 0x4e927c,
fnv_test_str[122]: 0xb140dd,
fnv_test_str[123]: 0x1b25e1,
fnv_test_str[124]: 0xbb59c8,
fnv_test_str[125]: 0x524a34,
fnv_test_str[126]: 0x16ef98,
fnv_test_str[127]: 0x648bd3,
fnv_test_str[128]: 0xa4bc83,
fnv_test_str[129]: 0x53ae47,
fnv_test_str[130]: 0x302859,
fnv_test_str[131]: 0x6deda7,
fnv_test_str[132]: 0x36db15,
fnv_test_str[133]: 0x9d33fc,
fnv_test_str[134]: 0xbb6ce2,
fnv_test_str[135]: 0xf83893,
fnv_test_str[136]: 0x08bf51,
fnv_test_str[137]: 0xcc8e5f,
fnv_test_str[138]: 0xe20f9f,
fnv_test_str[139]: 0xe97f2e,
fnv_test_str[140]: 0x37b27b,
fnv_test_str[141]: 0x9e874a,
fnv_test_str[142]: 0xe63f5a,
fnv_test_str[143]: 0xb50b11,
fnv_test_str[144]: 0xd678e6,
fnv_test_str[145]: 0xd5b723,
fnv_test_str[146]: 0x450bb7,
fnv_test_str[147]: 0x72d79d,
fnv_test_str[148]: 0x06679c,
fnv_test_str[149]: 0x52e15c,
fnv_test_str[150]: 0x9664f7,
fnv_test_str[151]: 0x3258b6,
fnv_test_str[152]: 0xed6ea7,
fnv_test_str[153]: 0x7d7ce2,
fnv_test_str[154]: 0xc71ba1,
fnv_test_str[155]: 0x84f14b,
fnv_test_str[156]: 0x8ecf2e,
fnv_test_str[157]: 0x94f673,
fnv_test_str[158]: 0x970112,
fnv_test_str[159]: 0x6e172a,
fnv_test_str[160]: 0xf8f6e7,
fnv_test_str[161]: 0xf58843,
fnv_test_str[162]: 0x17b6b2,
fnv_test_str[163]: 0xad4cfb,
fnv_test_str[164]: 0x256811,
fnv_test_str[165]: 0xb18dd8,
fnv_test_str[166]: 0x61c153,
fnv_test_str[167]: 0x47d20d,
fnv_test_str[168]: 0x8b689f,
fnv_test_str[169]: 0xd2a40b,
fnv_test_str[170]: 0x549b0a,
fnv_test_str[171]: 0xe1b55b,
fnv_test_str[172]: 0x0cd3d1,
fnv_test_str[173]: 0x471605,
fnv_test_str[174]: 0x5eef10,
fnv_test_str[175]: 0xed3629,
fnv_test_str[176]: 0x624952,
fnv_test_str[177]: 0x9b8688,
fnv_test_str[178]: 0x15e25f,
fnv_test_str[179]: 0xa98d05,
fnv_test_str[180]: 0xdf8bcc,
fnv_test_str[181]: 0x1e9051,
fnv_test_str[182]: 0x3f70db,
fnv_test_str[183]: 0x95aedb,
fnv_test_str[184]: 0xa7f7d7,
fnv_test_str[185]: 0x3bc660,
fnv_test_str[186]: 0x610967,
fnv_test_str[187]: 0x157785,
fnv_test_str[188]: 0x2b2800,
fnv_test_str[189]: 0x8239ef,
fnv_test_str[190]: 0x5869f5,
fnv_test_str[191]: 0x415c76,
fnv_test_str[192]: 0xe4ff6f,
fnv_test_str[193]: 0xb7977d,
fnv_test_str[194]: 0xa43a7b,
fnv_test_str[195]: 0xb3be1e,
fnv_test_str[196]: 0x777aaf,
fnv_test_str[197]: 0x21c38a,
fnv_test_str[198]: 0x9d0839,
fnv_test_str[199]: 0x823d2f,
fnv_test_str[200]: 0xa27250,
fnv_test_str[201]: 0xc5c656,
fnv_test_str[202]: 0x3b0800,
}
def oid(x):
return ObjectId()
def oid_generated_on_client(oid):
"""Is this process's PID in this ObjectId?"""
pid_from_doc = struct.unpack(">H", oid.binary[7:9])[0]
return (os.getpid() % 0xFFFF) == pid_from_doc
class TestObjectId(unittest.TestCase):
def test_creation(self):
self.assertRaises(TypeError, ObjectId, 4)
self.assertRaises(TypeError, ObjectId, 175.0)
self.assertRaises(TypeError, ObjectId, {"test": 4})
self.assertRaises(TypeError, ObjectId, ["something"])
self.assertRaises(InvalidId, ObjectId, "")
self.assertRaises(InvalidId, ObjectId, "12345678901")
self.assertRaises(InvalidId, ObjectId, "1234567890123")
self.assertTrue(ObjectId())
self.assertTrue(ObjectId(b"123456789012"))
a = ObjectId()
self.assertTrue(ObjectId(a))
def test_fnv_1a_24(self):
for key in fnv1a_24_vector:
self.assertEqual(_fnv_1a_24(key), fnv1a_24_vector[key])
def test_unicode(self):
a = ObjectId()
self.assertEqual(a, ObjectId(_unicode(a)))
self.assertEqual(ObjectId("123456789012123456789012"),
ObjectId(u"123456789012123456789012"))
self.assertRaises(InvalidId, ObjectId, u"hello")
def test_from_hex(self):
ObjectId("123456789012123456789012")
self.assertRaises(InvalidId, ObjectId, "123456789012123456789G12")
self.assertRaises(InvalidId, ObjectId, u"123456789012123456789G12")
def test_repr_str(self):
self.assertEqual(repr(ObjectId("1234567890abcdef12345678")),
"ObjectId('1234567890abcdef12345678')")
self.assertEqual(str(ObjectId("1234567890abcdef12345678")),
"1234567890abcdef12345678")
self.assertEqual(str(ObjectId(b"123456789012")),
"313233343536373839303132")
self.assertEqual(ObjectId("1234567890abcdef12345678").binary,
b'\x124Vx\x90\xab\xcd\xef\x124Vx')
self.assertEqual(str(ObjectId(b'\x124Vx\x90\xab\xcd\xef\x124Vx')),
"1234567890abcdef12345678")
def test_equality(self):
a = ObjectId()
self.assertEqual(a, ObjectId(a))
self.assertEqual(ObjectId(b"123456789012"),
ObjectId(b"123456789012"))
self.assertNotEqual(ObjectId(), ObjectId())
self.assertNotEqual(ObjectId(b"123456789012"), b"123456789012")
# Explicitly test inequality
self.assertFalse(a != ObjectId(a))
self.assertFalse(ObjectId(b"123456789012") !=
ObjectId(b"123456789012"))
def test_binary_str_equivalence(self):
a = ObjectId()
self.assertEqual(a, ObjectId(a.binary))
self.assertEqual(a, ObjectId(str(a)))
def test_pid(self):
self.assertTrue(oid_generated_on_client(ObjectId()))
def test_generation_time(self):
d1 = datetime.datetime.utcnow()
d2 = ObjectId().generation_time
self.assertEqual(utc, d2.tzinfo)
d2 = d2.replace(tzinfo=None)
self.assertTrue(d2 - d1 < datetime.timedelta(seconds=2))
def test_from_datetime(self):
if 'PyPy 1.8.0' in sys.version:
# See https://bugs.pypy.org/issue1092
raise SkipTest("datetime.timedelta is broken in pypy 1.8.0")
d = datetime.datetime.utcnow()
d = d - datetime.timedelta(microseconds=d.microsecond)
oid = ObjectId.from_datetime(d)
self.assertEqual(d, oid.generation_time.replace(tzinfo=None))
self.assertEqual("0" * 16, str(oid)[8:])
aware = datetime.datetime(1993, 4, 4, 2,
tzinfo=FixedOffset(555, "SomeZone"))
as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc)
oid = ObjectId.from_datetime(aware)
self.assertEqual(as_utc, oid.generation_time)
def test_pickling(self):
orig = ObjectId()
for protocol in [0, 1, 2, -1]:
pkl = pickle.dumps(orig, protocol=protocol)
self.assertEqual(orig, pickle.loads(pkl))
def test_pickle_backwards_compatability(self):
# This string was generated by pickling an ObjectId in pymongo
# version 1.9
pickled_with_1_9 = (
b"ccopy_reg\n_reconstructor\np0\n"
b"(cbson.objectid\nObjectId\np1\nc__builtin__\n"
b"object\np2\nNtp3\nRp4\n"
b"(dp5\nS'_ObjectId__id'\np6\n"
b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np7\nsb.")
# We also test against a hardcoded "New" pickle format so that we
# make sure we're backward compatible with the current version in
# the future as well.
pickled_with_1_10 = (
b"ccopy_reg\n_reconstructor\np0\n"
b"(cbson.objectid\nObjectId\np1\nc__builtin__\n"
b"object\np2\nNtp3\nRp4\n"
b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np5\nb.")
if PY3:
# Have to load using 'latin-1' since these were pickled in python2.x.
oid_1_9 = pickle.loads(pickled_with_1_9, encoding='latin-1')
oid_1_10 = pickle.loads(pickled_with_1_10, encoding='latin-1')
else:
oid_1_9 = pickle.loads(pickled_with_1_9)
oid_1_10 = pickle.loads(pickled_with_1_10)
self.assertEqual(oid_1_9, ObjectId("4d9a66561376c00b88000000"))
self.assertEqual(oid_1_9, oid_1_10)
def test_is_valid(self):
self.assertFalse(ObjectId.is_valid(None))
self.assertFalse(ObjectId.is_valid(4))
self.assertFalse(ObjectId.is_valid(175.0))
self.assertFalse(ObjectId.is_valid({"test": 4}))
self.assertFalse(ObjectId.is_valid(["something"]))
self.assertFalse(ObjectId.is_valid(""))
self.assertFalse(ObjectId.is_valid("12345678901"))
self.assertFalse(ObjectId.is_valid("1234567890123"))
self.assertTrue(ObjectId.is_valid(b"123456789012"))
self.assertTrue(ObjectId.is_valid("123456789012123456789012"))
if __name__ == "__main__":
unittest.main()
| |
"""Makes figure with gradient-weighted class-activation maps (Grad-CAM)."""
import os
import pickle
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot
from PIL import Image
from gewittergefahr.gg_utils import general_utils
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import monte_carlo
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import gradcam
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import cam_plotting
from gewittergefahr.plotting import significance_plotting
from gewittergefahr.plotting import imagemagick_utils
from gewittergefahr.scripts import plot_input_examples as plot_examples
NONE_STRINGS = ['None', 'none']
RADAR_HEIGHTS_M_AGL = numpy.array([2000, 6000, 10000], dtype=int)
RADAR_FIELD_NAMES = [radar_utils.REFL_NAME, radar_utils.VORTICITY_NAME]
COLOUR_BAR_LENGTH = 0.25
PANEL_NAME_FONT_SIZE = 30
COLOUR_BAR_FONT_SIZE = 25
CONVERT_EXE_NAME = '/usr/bin/convert'
TITLE_FONT_SIZE = 150
TITLE_FONT_NAME = 'DejaVu-Sans-Bold'
FIGURE_RESOLUTION_DPI = 300
CONCAT_FIGURE_SIZE_PX = int(1e7)
GRADCAM_FILES_ARG_NAME = 'input_gradcam_file_names'
MC_FILES_ARG_NAME = 'input_monte_carlo_file_names'
COMPOSITE_NAMES_ARG_NAME = 'composite_names'
COLOUR_MAP_ARG_NAME = 'colour_map_name'
MIN_VALUES_ARG_NAME = 'min_colour_values'
MAX_VALUES_ARG_NAME = 'max_colour_values'
NUM_CONTOURS_ARG_NAME = 'num_contours'
SMOOTHING_RADIUS_ARG_NAME = 'smoothing_radius_grid_cells'
MAX_FDR_ARG_NAME = 'monte_carlo_max_fdr'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
GRADCAM_FILES_HELP_STRING = (
'List of Grad-CAM files (each will be read by `gradcam.read_file`).'
)
MC_FILES_HELP_STRING = (
'List of files with Monte Carlo significance (one per saliency file). Each'
' will be read by `_read_monte_carlo_test`. If you do not want to plot '
'significance for the [i]th composite, make the [i]th list element "None".'
)
COMPOSITE_NAMES_HELP_STRING = (
'List of composite names (one for each Grad-CAM file). This list must be '
'space-separated, but after reading the list, underscores within each item '
'will be replaced by spaces.'
)
COLOUR_MAP_HELP_STRING = (
'Name of colour map. Class activation for each predictor will be plotted '
'with the same colour map. For example, if name is "Greys", the colour map'
' used will be `pyplot.cm.Greys`. This argument supports only pyplot '
'colour maps.'
)
MIN_VALUES_HELP_STRING = (
'Minimum class activation in each colour scheme (one per file). Use '
'negative values to let these be determined automatically.'
)
MAX_VALUES_HELP_STRING = 'Same as `{0:s}` but for max values.'.format(
MIN_VALUES_ARG_NAME
)
NUM_CONTOURS_HELP_STRING = 'Number of contours for class activation.'
SMOOTHING_RADIUS_HELP_STRING = (
'e-folding radius for Gaussian smoother (num grid cells). If you do not '
'want to smooth CAMs, make this non-positive.'
)
MAX_FDR_HELP_STRING = (
'Max FDR (false-discovery rate) for field-based version of Monte Carlo '
'significance test. If you do not want to use field-based version, leave '
'this argument alone.'
)
OUTPUT_DIR_HELP_STRING = (
'Name of output directory (figures will be saved here).'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + GRADCAM_FILES_ARG_NAME, type=str, nargs='+', required=True,
help=GRADCAM_FILES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MC_FILES_ARG_NAME, type=str, nargs='+', required=True,
help=MC_FILES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + COMPOSITE_NAMES_ARG_NAME, type=str, nargs='+', required=True,
help=COMPOSITE_NAMES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + COLOUR_MAP_ARG_NAME, type=str, required=False, default='binary',
help=COLOUR_MAP_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MIN_VALUES_ARG_NAME, type=float, nargs='+', required=True,
help=MIN_VALUES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MAX_VALUES_ARG_NAME, type=float, nargs='+', required=True,
help=MAX_VALUES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_CONTOURS_ARG_NAME, type=int, required=False,
default=15, help=NUM_CONTOURS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + SMOOTHING_RADIUS_ARG_NAME, type=float, required=False,
default=1., help=SMOOTHING_RADIUS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MAX_FDR_ARG_NAME, type=float, required=False, default=-1.,
help=MAX_FDR_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING
)
def _read_one_composite(gradcam_file_name, smoothing_radius_grid_cells,
monte_carlo_file_name, monte_carlo_max_fdr):
"""Reads class-activation map for one composite.
E = number of examples
M = number of rows in grid
N = number of columns in grid
H = number of heights in grid
F = number of radar fields
:param gradcam_file_name: Path to input file (will be read by
`gradcam.read_file`).
:param monte_carlo_file_name: Path to Monte Carlo file (will be read by
`_read_monte_carlo_file`).
:param smoothing_radius_grid_cells: Radius for Gaussian smoother, used only
for class-activation map.
:param monte_carlo_max_fdr: See documentation at top of file.
:return: mean_radar_matrix: E-by-M-by-N-by-H-by-F numpy array with mean
radar fields.
:return: mean_class_activn_matrix: E-by-M-by-N-by-H numpy array with mean
class-activation fields.
:return: significance_matrix: E-by-M-by-N-by-H numpy array of Boolean
flags.
:return: model_metadata_dict: Dictionary returned by
`cnn.read_model_metadata`.
"""
print('Reading CAMs from: "{0:s}"...'.format(gradcam_file_name))
gradcam_dict = gradcam.read_file(gradcam_file_name)[0]
mean_radar_matrix = numpy.expand_dims(
gradcam_dict[gradcam.MEAN_PREDICTOR_MATRICES_KEY][0], axis=0
)
mean_class_activn_matrix = numpy.expand_dims(
gradcam_dict[gradcam.MEAN_CAM_MATRICES_KEY][0], axis=0
)
if smoothing_radius_grid_cells is not None:
print((
'Smoothing class-activation maps with Gaussian filter (e-folding '
'radius of {0:.1f} grid cells)...'
).format(
smoothing_radius_grid_cells
))
mean_class_activn_matrix[0, ...] = general_utils.apply_gaussian_filter(
input_matrix=mean_class_activn_matrix[0, ...],
e_folding_radius_grid_cells=smoothing_radius_grid_cells
)
model_file_name = gradcam_dict[gradcam.MODEL_FILE_KEY]
model_metafile_name = cnn.find_metafile(model_file_name)
if monte_carlo_file_name is None:
significance_matrix = numpy.full(
mean_class_activn_matrix.shape, False, dtype=bool
)
else:
print('Reading Monte Carlo test from: "{0:s}"...'.format(
monte_carlo_file_name
))
this_file_handle = open(monte_carlo_file_name, 'rb')
monte_carlo_dict = pickle.load(this_file_handle)
this_file_handle.close()
p_value_matrix = monte_carlo_dict[monte_carlo.P_VALUE_MATRICES_KEY][0]
if monte_carlo_max_fdr is None:
significance_matrix = p_value_matrix <= 0.05
else:
significance_matrix = monte_carlo.find_sig_grid_points(
p_value_matrix=p_value_matrix,
max_false_discovery_rate=monte_carlo_max_fdr
)
significance_matrix = numpy.expand_dims(significance_matrix, axis=0)
print('Fraction of significant differences: {0:.4f}'.format(
numpy.mean(significance_matrix.astype(float))
))
print('Reading CNN metadata from: "{0:s}"...'.format(model_metafile_name))
model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
good_indices = numpy.array([
numpy.where(
training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] == h
)[0][0]
for h in RADAR_HEIGHTS_M_AGL
], dtype=int)
mean_radar_matrix = mean_radar_matrix[..., good_indices, :]
mean_class_activn_matrix = mean_class_activn_matrix[..., good_indices]
significance_matrix = significance_matrix[..., good_indices]
good_indices = numpy.array([
training_option_dict[trainval_io.RADAR_FIELDS_KEY].index(f)
for f in RADAR_FIELD_NAMES
], dtype=int)
mean_radar_matrix = mean_radar_matrix[..., good_indices]
training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] = RADAR_HEIGHTS_M_AGL
training_option_dict[trainval_io.RADAR_FIELDS_KEY] = RADAR_FIELD_NAMES
training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] = None
model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] = training_option_dict
return (
mean_radar_matrix, mean_class_activn_matrix, significance_matrix,
model_metadata_dict
)
def _overlay_text(
image_file_name, x_offset_from_center_px, y_offset_from_top_px,
text_string):
"""Overlays text on image.
:param image_file_name: Path to image file.
:param x_offset_from_center_px: Center-relative x-coordinate (pixels).
:param y_offset_from_top_px: Top-relative y-coordinate (pixels).
:param text_string: String to overlay.
:raises: ValueError: if ImageMagick command (which is ultimately a Unix
command) fails.
"""
# TODO(thunderhoser): Put this method somewhere more general.
command_string = (
'"{0:s}" "{1:s}" -gravity north -pointsize {2:d} -font "{3:s}" '
'-fill "rgb(0, 0, 0)" -annotate {4:+d}{5:+d} "{6:s}" "{1:s}"'
).format(
CONVERT_EXE_NAME, image_file_name, TITLE_FONT_SIZE, TITLE_FONT_NAME,
x_offset_from_center_px, y_offset_from_top_px, text_string
)
exit_code = os.system(command_string)
if exit_code == 0:
return
raise ValueError(imagemagick_utils.ERROR_STRING)
def _plot_one_composite(
gradcam_file_name, monte_carlo_file_name, composite_name_abbrev,
composite_name_verbose, colour_map_object, min_colour_value,
max_colour_value, num_contours, smoothing_radius_grid_cells,
monte_carlo_max_fdr, output_dir_name):
"""Plots class-activation map for one composite.
:param gradcam_file_name: Path to input file (will be read by
`gradcam.read_file`).
:param monte_carlo_file_name: Path to Monte Carlo file (will be read by
`_read_monte_carlo_file`).
:param composite_name_abbrev: Abbrev composite name (will be used in file
names).
:param composite_name_verbose: Verbose composite name (will be used in
figure title).
:param colour_map_object: See documentation at top of file.
:param min_colour_value: Minimum value in colour bar (may be NaN).
:param max_colour_value: Max value in colour bar (may be NaN).
:param num_contours: See documentation at top of file.
:param smoothing_radius_grid_cells: Same.
:param monte_carlo_max_fdr: Same.
:param output_dir_name: Name of output directory (figures will be saved
here).
:return: main_figure_file_name: Path to main image file created by this
method.
:return: min_colour_value: Same as input but cannot be None.
:return: max_colour_value: Same as input but cannot be None.
"""
(
mean_radar_matrix, mean_class_activn_matrix, significance_matrix,
model_metadata_dict
) = _read_one_composite(
gradcam_file_name=gradcam_file_name,
smoothing_radius_grid_cells=smoothing_radius_grid_cells,
monte_carlo_file_name=monte_carlo_file_name,
monte_carlo_max_fdr=monte_carlo_max_fdr
)
if numpy.isnan(min_colour_value) or numpy.isnan(max_colour_value):
min_colour_value_log10 = numpy.log10(
numpy.percentile(mean_class_activn_matrix, 1.)
)
max_colour_value_log10 = numpy.log10(
numpy.percentile(mean_class_activn_matrix, 99.)
)
min_colour_value_log10 = max([min_colour_value_log10, -2.])
max_colour_value_log10 = max([max_colour_value_log10, -1.])
min_colour_value_log10 = min([min_colour_value_log10, 1.])
max_colour_value_log10 = min([max_colour_value_log10, 2.])
min_colour_value = 10 ** min_colour_value_log10
max_colour_value = 10 ** max_colour_value_log10
else:
min_colour_value_log10 = numpy.log10(min_colour_value)
max_colour_value_log10 = numpy.log10(max_colour_value)
contour_interval_log10 = (
(max_colour_value_log10 - min_colour_value_log10) /
(num_contours - 1)
)
mean_activn_matrix_log10 = numpy.log10(mean_class_activn_matrix)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
field_names = training_option_dict[trainval_io.RADAR_FIELDS_KEY]
num_fields = mean_radar_matrix.shape[-1]
num_heights = mean_radar_matrix.shape[-2]
handle_dict = plot_examples.plot_one_example(
list_of_predictor_matrices=[mean_radar_matrix],
model_metadata_dict=model_metadata_dict, pmm_flag=True,
allow_whitespace=True, plot_panel_names=True,
panel_name_font_size=PANEL_NAME_FONT_SIZE,
add_titles=False, label_colour_bars=True,
colour_bar_length=COLOUR_BAR_LENGTH,
colour_bar_font_size=COLOUR_BAR_FONT_SIZE,
num_panel_rows=num_heights
)
figure_objects = handle_dict[plot_examples.RADAR_FIGURES_KEY]
axes_object_matrices = handle_dict[plot_examples.RADAR_AXES_KEY]
for k in range(num_fields):
cam_plotting.plot_many_2d_grids(
class_activation_matrix_3d=numpy.flip(
mean_activn_matrix_log10[0, ...], axis=0
),
axes_object_matrix=axes_object_matrices[k],
colour_map_object=colour_map_object,
min_contour_level=min_colour_value_log10,
max_contour_level=max_colour_value_log10,
contour_interval=contour_interval_log10
)
significance_plotting.plot_many_2d_grids_without_coords(
significance_matrix=numpy.flip(
significance_matrix[0, ...], axis=0
),
axes_object_matrix=axes_object_matrices[k]
)
panel_file_names = [None] * num_fields
for k in range(num_fields):
panel_file_names[k] = '{0:s}/{1:s}_{2:s}.jpg'.format(
output_dir_name, composite_name_abbrev,
field_names[k].replace('_', '-')
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[k]))
figure_objects[k].savefig(
panel_file_names[k], dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(figure_objects[k])
main_figure_file_name = '{0:s}/{1:s}_gradcam.jpg'.format(
output_dir_name, composite_name_abbrev
)
print('Concatenating panels to: "{0:s}"...'.format(main_figure_file_name))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names,
output_file_name=main_figure_file_name,
num_panel_rows=1, num_panel_columns=num_fields, border_width_pixels=50
)
imagemagick_utils.resize_image(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX
)
imagemagick_utils.trim_whitespace(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
border_width_pixels=TITLE_FONT_SIZE + 25
)
_overlay_text(
image_file_name=main_figure_file_name,
x_offset_from_center_px=0, y_offset_from_top_px=0,
text_string=composite_name_verbose
)
imagemagick_utils.trim_whitespace(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
border_width_pixels=10
)
return main_figure_file_name, min_colour_value, max_colour_value
def _add_colour_bar(figure_file_name, colour_map_object, min_colour_value,
max_colour_value, temporary_dir_name):
"""Adds colour bar to saved image file.
:param figure_file_name: Path to saved image file. Colour bar will be added
to this image.
:param colour_map_object: Colour scheme (instance of `matplotlib.pyplot.cm`
or similar).
:param min_colour_value: Minimum value in colour scheme.
:param max_colour_value: Max value in colour scheme.
:param temporary_dir_name: Name of temporary output directory.
"""
this_image_matrix = Image.open(figure_file_name)
figure_width_px, figure_height_px = this_image_matrix.size
figure_width_inches = float(figure_width_px) / FIGURE_RESOLUTION_DPI
figure_height_inches = float(figure_height_px) / FIGURE_RESOLUTION_DPI
extra_figure_object, extra_axes_object = pyplot.subplots(
1, 1, figsize=(figure_width_inches, figure_height_inches)
)
extra_axes_object.axis('off')
dummy_values = numpy.array([min_colour_value, max_colour_value])
colour_bar_object = plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=extra_axes_object, data_matrix=dummy_values,
colour_map_object=colour_map_object,
min_value=numpy.log10(min_colour_value),
max_value=numpy.log10(max_colour_value),
orientation_string='vertical', fraction_of_axis_length=1.25,
extend_min=False, extend_max=True, font_size=COLOUR_BAR_FONT_SIZE,
aspect_ratio=50.
)
tick_values = colour_bar_object.get_ticks()
tick_strings = [
'{0:.2f}'.format(10 ** v) for v in tick_values
]
for i in range(len(tick_strings)):
if '.' in tick_strings[i][:3]:
tick_strings[i] = tick_strings[i][:4]
else:
tick_strings[i] = tick_strings[i].split('.')[0]
colour_bar_object.set_ticks(tick_values)
colour_bar_object.set_ticklabels(tick_strings)
extra_file_name = '{0:s}/gradcam_colour-bar.jpg'.format(temporary_dir_name)
print('Saving colour bar to: "{0:s}"...'.format(extra_file_name))
extra_figure_object.savefig(
extra_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(extra_figure_object)
print('Concatenating colour bar to: "{0:s}"...'.format(figure_file_name))
imagemagick_utils.concatenate_images(
input_file_names=[figure_file_name, extra_file_name],
output_file_name=figure_file_name,
num_panel_rows=1, num_panel_columns=2,
extra_args_string='-gravity Center'
)
os.remove(extra_file_name)
imagemagick_utils.trim_whitespace(
input_file_name=figure_file_name, output_file_name=figure_file_name
)
def _run(gradcam_file_names, monte_carlo_file_names, composite_names,
colour_map_name, min_colour_values, max_colour_values, num_contours,
smoothing_radius_grid_cells, monte_carlo_max_fdr, output_dir_name):
"""Makes figure with gradient-weighted class-activation maps (Grad-CAM).
This is effectively the main method.
:param gradcam_file_names: See documentation at top of file.
:param monte_carlo_file_names: Same.
:param composite_names: Same.
:param colour_map_name: Same.
:param min_colour_values: Same.
:param max_colour_values: Same.
:param num_contours: Same.
:param smoothing_radius_grid_cells: Same.
:param output_dir_name: Same.
"""
if smoothing_radius_grid_cells <= 0:
smoothing_radius_grid_cells = None
if monte_carlo_max_fdr <= 0:
monte_carlo_max_fdr = None
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name
)
colour_map_object = pyplot.cm.get_cmap(colour_map_name)
error_checking.assert_is_geq(num_contours, 10)
num_composites = len(gradcam_file_names)
expected_dim = numpy.array([num_composites], dtype=int)
error_checking.assert_is_numpy_array(
numpy.array(composite_names), exact_dimensions=expected_dim
)
error_checking.assert_is_numpy_array(
numpy.array(monte_carlo_file_names), exact_dimensions=expected_dim
)
monte_carlo_file_names = [
None if f in NONE_STRINGS else f for f in monte_carlo_file_names
]
nan_indices = numpy.where(numpy.logical_or(
max_colour_values < 0, min_colour_values < 0
))[0]
min_colour_values[nan_indices] = numpy.nan
max_colour_values[nan_indices] = numpy.nan
error_checking.assert_is_numpy_array(
min_colour_values, exact_dimensions=expected_dim
)
error_checking.assert_is_numpy_array(
max_colour_values, exact_dimensions=expected_dim
)
assert not numpy.any(max_colour_values <= min_colour_values)
composite_names_abbrev = [
n.replace('_', '-').lower() for n in composite_names
]
composite_names_verbose = [
'({0:s}) {1:s}'.format(
chr(ord('a') + i), composite_names[i].replace('_', ' ')
)
for i in range(num_composites)
]
panel_file_names = [None] * num_composites
for i in range(num_composites):
(
panel_file_names[i], min_colour_values[i], max_colour_values[i]
) = _plot_one_composite(
gradcam_file_name=gradcam_file_names[i],
monte_carlo_file_name=monte_carlo_file_names[i],
composite_name_abbrev=composite_names_abbrev[i],
composite_name_verbose=composite_names_verbose[i],
colour_map_object=colour_map_object,
min_colour_value=min_colour_values[i],
max_colour_value=max_colour_values[i],
num_contours=num_contours,
smoothing_radius_grid_cells=smoothing_radius_grid_cells,
monte_carlo_max_fdr=monte_carlo_max_fdr,
output_dir_name=output_dir_name
)
_add_colour_bar(
figure_file_name=panel_file_names[i],
colour_map_object=colour_map_object,
min_colour_value=min_colour_values[i],
max_colour_value=max_colour_values[i],
temporary_dir_name=output_dir_name
)
print('\n')
figure_file_name = '{0:s}/gradcam_concat.jpg'.format(output_dir_name)
print('Concatenating panels to: "{0:s}"...'.format(figure_file_name))
num_panel_rows = int(numpy.ceil(
numpy.sqrt(num_composites)
))
num_panel_columns = int(numpy.floor(
float(num_composites) / num_panel_rows
))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names,
output_file_name=figure_file_name, border_width_pixels=25,
num_panel_rows=num_panel_rows, num_panel_columns=num_panel_columns
)
imagemagick_utils.trim_whitespace(
input_file_name=figure_file_name, output_file_name=figure_file_name,
border_width_pixels=10
)
imagemagick_utils.resize_image(
input_file_name=figure_file_name, output_file_name=figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX
)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
gradcam_file_names=getattr(INPUT_ARG_OBJECT, GRADCAM_FILES_ARG_NAME),
monte_carlo_file_names=getattr(INPUT_ARG_OBJECT, MC_FILES_ARG_NAME),
composite_names=getattr(INPUT_ARG_OBJECT, COMPOSITE_NAMES_ARG_NAME),
colour_map_name=getattr(INPUT_ARG_OBJECT, COLOUR_MAP_ARG_NAME),
min_colour_values=numpy.array(
getattr(INPUT_ARG_OBJECT, MIN_VALUES_ARG_NAME), dtype=float
),
max_colour_values=numpy.array(
getattr(INPUT_ARG_OBJECT, MAX_VALUES_ARG_NAME), dtype=float
),
num_contours=getattr(INPUT_ARG_OBJECT, NUM_CONTOURS_ARG_NAME),
smoothing_radius_grid_cells=getattr(
INPUT_ARG_OBJECT, SMOOTHING_RADIUS_ARG_NAME
),
monte_carlo_max_fdr=getattr(INPUT_ARG_OBJECT, MAX_FDR_ARG_NAME),
output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
| |
"""
Types for objects parsed from the configuration.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from collections import namedtuple
import six
from compose.config.config import V1
from compose.config.errors import ConfigurationError
from compose.const import IS_WINDOWS_PLATFORM
from compose.utils import splitdrive
class VolumeFromSpec(namedtuple('_VolumeFromSpec', 'source mode type')):
# TODO: drop service_names arg when v1 is removed
@classmethod
def parse(cls, volume_from_config, service_names, version):
func = cls.parse_v1 if version == V1 else cls.parse_v2
return func(service_names, volume_from_config)
@classmethod
def parse_v1(cls, service_names, volume_from_config):
parts = volume_from_config.split(':')
if len(parts) > 2:
raise ConfigurationError(
"volume_from {} has incorrect format, should be "
"service[:mode]".format(volume_from_config))
if len(parts) == 1:
source = parts[0]
mode = 'rw'
else:
source, mode = parts
type = 'service' if source in service_names else 'container'
return cls(source, mode, type)
@classmethod
def parse_v2(cls, service_names, volume_from_config):
parts = volume_from_config.split(':')
if len(parts) > 3:
raise ConfigurationError(
"volume_from {} has incorrect format, should be one of "
"'<service name>[:<mode>]' or "
"'container:<container name>[:<mode>]'".format(volume_from_config))
if len(parts) == 1:
source = parts[0]
return cls(source, 'rw', 'service')
if len(parts) == 2:
if parts[0] == 'container':
type, source = parts
return cls(source, 'rw', type)
source, mode = parts
return cls(source, mode, 'service')
if len(parts) == 3:
type, source, mode = parts
if type not in ('service', 'container'):
raise ConfigurationError(
"Unknown volumes_from type '{}' in '{}'".format(
type,
volume_from_config))
return cls(source, mode, type)
def repr(self):
return '{v.type}:{v.source}:{v.mode}'.format(v=self)
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigurationError(
"Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
def serialize_restart_spec(restart_spec):
parts = [restart_spec['Name']]
if restart_spec['MaximumRetryCount']:
parts.append(six.text_type(restart_spec['MaximumRetryCount']))
return ':'.join(parts)
def parse_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, dict):
return dict(extra_hosts_config)
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
# TODO: validate string contains ':' ?
host, ip = extra_hosts_line.split(':', 1)
extra_hosts_dict[host.strip()] = ip.strip()
return extra_hosts_dict
def normalize_path_for_engine(path):
"""Windows paths, c:\my\path\shiny, need to be changed to be compatible with
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
"""
drive, tail = splitdrive(path)
if drive:
path = '/' + drive.lower().rstrip(':') + tail
return path.replace('\\', '/')
class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
@classmethod
def _parse_unix(cls, volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigurationError(
"Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external = None
internal = os.path.normpath(parts[0])
else:
external = os.path.normpath(parts[0])
internal = os.path.normpath(parts[1])
mode = 'rw'
if len(parts) == 3:
mode = parts[2]
return cls(external, internal, mode)
@classmethod
def _parse_win32(cls, volume_config):
# relative paths in windows expand to include the drive, eg C:\
# so we join the first 2 parts back together to count as one
mode = 'rw'
def separate_next_section(volume_config):
drive, tail = splitdrive(volume_config)
parts = tail.split(':', 1)
if drive:
parts[0] = drive + parts[0]
return parts
parts = separate_next_section(volume_config)
if len(parts) == 1:
internal = normalize_path_for_engine(os.path.normpath(parts[0]))
external = None
else:
external = parts[0]
parts = separate_next_section(parts[1])
external = normalize_path_for_engine(os.path.normpath(external))
internal = normalize_path_for_engine(os.path.normpath(parts[0]))
if len(parts) > 1:
if ':' in parts[1]:
raise ConfigurationError(
"Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config
)
mode = parts[1]
return cls(external, internal, mode)
@classmethod
def parse(cls, volume_config):
"""Parse a volume_config path and split it into external:internal[:mode]
parts to be returned as a valid VolumeSpec.
"""
if IS_WINDOWS_PLATFORM:
return cls._parse_win32(volume_config)
else:
return cls._parse_unix(volume_config)
def repr(self):
external = self.external + ':' if self.external else ''
return '{ext}{v.internal}:{v.mode}'.format(ext=external, v=self)
@property
def is_named_volume(self):
return self.external and not self.external.startswith(('.', '/', '~'))
class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
@classmethod
def parse(cls, link_spec):
target, _, alias = link_spec.partition(':')
if not alias:
alias = target
return cls(target, alias)
def repr(self):
if self.target == self.alias:
return self.target
return '{s.target}:{s.alias}'.format(s=self)
@property
def merge_field(self):
return self.alias
| |
# Copyright 2013 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
from nova.objects import base
from nova.objects import fields
from nova.objects import instance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_BLOCK_DEVICE_OPTIONAL_JOINED_FIELD = ['instance']
BLOCK_DEVICE_OPTIONAL_ATTRS = _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD
def _expected_cols(expected_attrs):
return [attr for attr in expected_attrs
if attr in _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD]
class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Add instance_uuid to get_by_volume_id method
VERSION = '1.1'
fields = {
'id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'instance': fields.ObjectField('Instance', nullable=True),
'source_type': fields.StringField(nullable=True),
'destination_type': fields.StringField(nullable=True),
'guest_format': fields.StringField(nullable=True),
'device_type': fields.StringField(nullable=True),
'disk_bus': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'device_name': fields.StringField(nullable=True),
'delete_on_termination': fields.BooleanField(default=False),
'snapshot_id': fields.StringField(nullable=True),
'volume_id': fields.StringField(nullable=True),
'volume_size': fields.IntegerField(nullable=True),
'image_id': fields.StringField(nullable=True),
'no_device': fields.BooleanField(default=False),
'connection_info': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, block_device_obj,
db_block_device, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for key in block_device_obj.fields:
if key in BLOCK_DEVICE_OPTIONAL_ATTRS:
continue
block_device_obj[key] = db_block_device[key]
if 'instance' in expected_attrs:
my_inst = instance.Instance()
instance.Instance._from_db_object(
context, my_inst, db_block_device['instance'])
block_device_obj.instance = my_inst
block_device_obj._context = context
block_device_obj.obj_reset_changes()
return block_device_obj
@base.remotable
def create(self, context):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
if 'instance' in updates:
raise exception.ObjectActionError(action='create',
reason='instance assigned')
updates.pop('id', None)
db_bdm = db.block_device_mapping_create(context, updates, legacy=False)
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_update_or_create_at_top(context, db_bdm, create=True)
self._from_db_object(context, self, db_bdm)
@base.remotable
def destroy(self, context):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
db.block_device_mapping_destroy(context, self.id)
delattr(self, base.get_attrname('id'))
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_destroy_at_top(context, self.instance_uuid,
device_name=self.device_name,
volume_id=self.volume_id)
@base.remotable
def save(self, context):
updates = self.obj_get_changes()
if 'instance' in updates:
raise exception.ObjectActionError(action='save',
reason='instance changed')
updates.pop('id', None)
updated = db.block_device_mapping_update(self._context, self.id,
updates, legacy=False)
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_update_or_create_at_top(context, updated)
self._from_db_object(context, self, updated)
@base.remotable_classmethod
def get_by_volume_id(cls, context, volume_id,
instance_uuid=None, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
db_bdm = db.block_device_mapping_get_by_volume_id(
context, volume_id, _expected_cols(expected_attrs))
if not db_bdm:
raise exception.VolumeBDMNotFound(volume_id=volume_id)
# NOTE (ndipanov): Move this to the db layer into a
# get_by_instance_and_volume_id method
if instance_uuid and instance_uuid != db_bdm['instance_uuid']:
raise exception.InvalidVolume(
reason=_("Volume does not belong to the "
"requested instance."))
return cls._from_db_object(context, cls(), db_bdm,
expected_attrs=expected_attrs)
@property
def is_root(self):
return self.boot_index == 0
@property
def is_volume(self):
return self.destination_type == 'volume'
@property
def is_image(self):
return self.source_type == 'image'
def get_image_mapping(self):
return block_device.BlockDeviceDict(self).get_image_mapping()
def obj_load_attr(self, attrname):
if attrname not in BLOCK_DEVICE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
self.instance = instance.Instance.get_by_uuid(self._context,
self.instance_uuid)
self.obj_reset_changes(fields=['instance'])
class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: BlockDeviceMapping <= version 1.1
# Version 1.2: Added use_slave to get_by_instance_uuid
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('BlockDeviceMapping'),
}
child_versions = {
'1.0': '1.0',
'1.1': '1.1',
'1.2': '1.1',
}
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
db_bdms = db.block_device_mapping_get_all_by_instance(
context, instance_uuid, use_slave=use_slave)
return base.obj_make_list(
context, cls(), BlockDeviceMapping, db_bdms or [])
def root_bdm(self):
try:
return (bdm_obj for bdm_obj in self if bdm_obj.is_root).next()
except StopIteration:
return
def root_metadata(self, context, image_api, volume_api):
root_bdm = self.root_bdm()
if not root_bdm:
return {}
if root_bdm.is_volume:
try:
volume = volume_api.get(context, root_bdm.volume_id)
return volume.get('volume_image_metadata', {})
except Exception:
raise exception.InvalidBDMVolume(id=root_bdm.id)
elif root_bdm.is_image:
try:
image_meta = image_api.show(context, root_bdm.image_id)
return image_meta.get('properties', {})
except Exception:
raise exception.InvalidBDMImage(id=root_bdm.id)
else:
return {}
def block_device_make_list(context, db_list, **extra_args):
return base.obj_make_list(context, BlockDeviceMappingList(),
BlockDeviceMapping, db_list, **extra_args)
| |
"""Xbox Media Source Implementation."""
from __future__ import annotations
from contextlib import suppress
from dataclasses import dataclass
from pydantic.error_wrappers import ValidationError # pylint: disable=no-name-in-module
from xbox.webapi.api.client import XboxLiveClient
from xbox.webapi.api.provider.catalog.models import FieldsTemplate, Image
from xbox.webapi.api.provider.gameclips.models import GameclipsResponse
from xbox.webapi.api.provider.screenshots.models import ScreenshotResponse
from xbox.webapi.api.provider.smartglass.models import InstalledPackage
from homeassistant.components.media_player.const import (
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_GAME,
MEDIA_CLASS_IMAGE,
MEDIA_CLASS_VIDEO,
)
from homeassistant.components.media_source.const import MEDIA_MIME_TYPES
from homeassistant.components.media_source.models import (
BrowseMediaSource,
MediaSource,
MediaSourceItem,
PlayMedia,
)
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from .browse_media import _find_media_image
from .const import DOMAIN
MIME_TYPE_MAP = {
"gameclips": "video/mp4",
"screenshots": "image/png",
}
MEDIA_CLASS_MAP = {
"gameclips": MEDIA_CLASS_VIDEO,
"screenshots": MEDIA_CLASS_IMAGE,
}
async def async_get_media_source(hass: HomeAssistantType):
"""Set up Xbox media source."""
entry = hass.config_entries.async_entries(DOMAIN)[0]
client = hass.data[DOMAIN][entry.entry_id]["client"]
return XboxSource(hass, client)
@callback
def async_parse_identifier(
item: MediaSourceItem,
) -> tuple[str, str, str]:
"""Parse identifier."""
identifier = item.identifier or ""
start = ["", "", ""]
items = identifier.lstrip("/").split("~~", 2)
return tuple(items + start[len(items) :])
@dataclass
class XboxMediaItem:
"""Represents gameclip/screenshot media."""
caption: str
thumbnail: str
uri: str
media_class: str
class XboxSource(MediaSource):
"""Provide Xbox screenshots and gameclips as media sources."""
name: str = "Xbox Game Media"
def __init__(self, hass: HomeAssistantType, client: XboxLiveClient):
"""Initialize Xbox source."""
super().__init__(DOMAIN)
self.hass: HomeAssistantType = hass
self.client: XboxLiveClient = client
async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia:
"""Resolve media to a url."""
_, category, url = async_parse_identifier(item)
kind = category.split("#", 1)[1]
return PlayMedia(url, MIME_TYPE_MAP[kind])
async def async_browse_media(
self, item: MediaSourceItem, media_types: tuple[str] = MEDIA_MIME_TYPES
) -> BrowseMediaSource:
"""Return media."""
title, category, _ = async_parse_identifier(item)
if not title:
return await self._build_game_library()
if not category:
return _build_categories(title)
return await self._build_media_items(title, category)
async def _build_game_library(self):
"""Display installed games across all consoles."""
apps = await self.client.smartglass.get_installed_apps()
games = {
game.one_store_product_id: game
for game in apps.result
if game.is_game and game.title_id
}
app_details = await self.client.catalog.get_products(
games.keys(),
FieldsTemplate.BROWSE,
)
images = {
prod.product_id: prod.localized_properties[0].images
for prod in app_details.products
}
return BrowseMediaSource(
domain=DOMAIN,
identifier="",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="",
title="Xbox Game Media",
can_play=False,
can_expand=True,
children=[_build_game_item(game, images) for game in games.values()],
children_media_class=MEDIA_CLASS_GAME,
)
async def _build_media_items(self, title, category):
"""Fetch requested gameclip/screenshot media."""
title_id, _, thumbnail = title.split("#", 2)
owner, kind = category.split("#", 1)
items: list[XboxMediaItem] = []
with suppress(ValidationError): # Unexpected API response
if kind == "gameclips":
if owner == "my":
response: GameclipsResponse = (
await self.client.gameclips.get_recent_clips_by_xuid(
self.client.xuid, title_id
)
)
elif owner == "community":
response: GameclipsResponse = await self.client.gameclips.get_recent_community_clips_by_title_id(
title_id
)
else:
return None
items = [
XboxMediaItem(
item.user_caption
or dt_util.as_local(
dt_util.parse_datetime(item.date_recorded)
).strftime("%b. %d, %Y %I:%M %p"),
item.thumbnails[0].uri,
item.game_clip_uris[0].uri,
MEDIA_CLASS_VIDEO,
)
for item in response.game_clips
]
elif kind == "screenshots":
if owner == "my":
response: ScreenshotResponse = (
await self.client.screenshots.get_recent_screenshots_by_xuid(
self.client.xuid, title_id
)
)
elif owner == "community":
response: ScreenshotResponse = await self.client.screenshots.get_recent_community_screenshots_by_title_id(
title_id
)
else:
return None
items = [
XboxMediaItem(
item.user_caption
or dt_util.as_local(item.date_taken).strftime(
"%b. %d, %Y %I:%M%p"
),
item.thumbnails[0].uri,
item.screenshot_uris[0].uri,
MEDIA_CLASS_IMAGE,
)
for item in response.screenshots
]
return BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}~~{category}",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="",
title=f"{owner.title()} {kind.title()}",
can_play=False,
can_expand=True,
children=[_build_media_item(title, category, item) for item in items],
children_media_class=MEDIA_CLASS_MAP[kind],
thumbnail=thumbnail,
)
def _build_game_item(item: InstalledPackage, images: list[Image]):
"""Build individual game."""
thumbnail = ""
image = _find_media_image(images.get(item.one_store_product_id, []))
if image is not None:
thumbnail = image.uri
if thumbnail[0] == "/":
thumbnail = f"https:{thumbnail}"
return BrowseMediaSource(
domain=DOMAIN,
identifier=f"{item.title_id}#{item.name}#{thumbnail}",
media_class=MEDIA_CLASS_GAME,
media_content_type="",
title=item.name,
can_play=False,
can_expand=True,
children_media_class=MEDIA_CLASS_DIRECTORY,
thumbnail=thumbnail,
)
def _build_categories(title):
"""Build base categories for Xbox media."""
_, name, thumbnail = title.split("#", 2)
base = BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}",
media_class=MEDIA_CLASS_GAME,
media_content_type="",
title=name,
can_play=False,
can_expand=True,
children=[],
children_media_class=MEDIA_CLASS_DIRECTORY,
thumbnail=thumbnail,
)
owners = ["my", "community"]
kinds = ["gameclips", "screenshots"]
for owner in owners:
for kind in kinds:
base.children.append(
BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}~~{owner}#{kind}",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="",
title=f"{owner.title()} {kind.title()}",
can_play=False,
can_expand=True,
children_media_class=MEDIA_CLASS_MAP[kind],
)
)
return base
def _build_media_item(title: str, category: str, item: XboxMediaItem):
"""Build individual media item."""
kind = category.split("#", 1)[1]
return BrowseMediaSource(
domain=DOMAIN,
identifier=f"{title}~~{category}~~{item.uri}",
media_class=item.media_class,
media_content_type=MIME_TYPE_MAP[kind],
title=item.caption,
can_play=True,
can_expand=False,
thumbnail=item.thumbnail,
)
| |
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
from itertools import chain
from threading import Thread
import subprocess
import os
from fabric import colors
from fabric.api import local
from fabric.utils import indent
import netaddr
import toml
import yaml
from lib.base import (
BGPContainer,
CmdBuffer,
BGP_ATTR_TYPE_AS_PATH,
BGP_ATTR_TYPE_NEXT_HOP,
BGP_ATTR_TYPE_MULTI_EXIT_DISC,
BGP_ATTR_TYPE_LOCAL_PREF,
BGP_ATTR_TYPE_MP_REACH_NLRI,
)
def extract_path_attribute(path, typ):
for a in path['attrs']:
if a['type'] == typ:
return a
return None
class GoBGPContainer(BGPContainer):
SHARED_VOLUME = '/root/shared_volume'
QUAGGA_VOLUME = '/etc/quagga'
def __init__(self, name, asn, router_id, ctn_image_name='osrg/gobgp',
log_level='debug', zebra=False, config_format='toml',
zapi_version=2, ospfd_config=None):
super(GoBGPContainer, self).__init__(name, asn, router_id,
ctn_image_name)
self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
self.log_level = log_level
self.prefix_set = None
self.neighbor_set = None
self.bgp_set = None
self.statements = None
self.default_policy = None
self.zebra = zebra
self.zapi_version = zapi_version
self.config_format = config_format
# To start OSPFd in GoBGP container, specify 'ospfd_config' as a dict
# type value.
# Example:
# ospfd_config = {
# 'redistributes': [
# 'connected',
# ],
# 'networks': {
# '192.168.1.0/24': '0.0.0.0', # <network>: <area>
# },
# }
self.ospfd_config = ospfd_config or {}
def _start_gobgp(self, graceful_restart=False):
c = CmdBuffer()
c << '#!/bin/bash'
c << '/go/bin/gobgpd -f {0}/gobgpd.conf -l {1} -p {2} -t {3} > ' \
'{0}/gobgpd.log 2>&1'.format(self.SHARED_VOLUME, self.log_level, '-r' if graceful_restart else '', self.config_format)
cmd = 'echo "{0:s}" > {1}/start.sh'.format(c, self.config_dir)
local(cmd, capture=True)
cmd = "chmod 755 {0}/start.sh".format(self.config_dir)
local(cmd, capture=True)
self.local("{0}/start.sh".format(self.SHARED_VOLUME), detach=True)
def graceful_restart(self):
self.local("pkill -INT gobgpd")
def _start_zebra(self):
if self.zapi_version == 2:
cmd = 'cp {0}/zebra.conf {1}/'.format(self.SHARED_VOLUME, self.QUAGGA_VOLUME)
self.local(cmd)
cmd = '/usr/lib/quagga/zebra -f {0}/zebra.conf'.format(self.QUAGGA_VOLUME)
else:
cmd = 'zebra -u root -g root -f {0}/zebra.conf'.format(self.SHARED_VOLUME)
self.local(cmd, detach=True)
def _start_ospfd(self):
if self.zapi_version == 2:
cmd = 'cp {0}/ospfd.conf {1}/'.format(self.SHARED_VOLUME, self.QUAGGA_VOLUME)
self.local(cmd)
cmd = '/usr/lib/quagga/ospfd -f {0}/ospfd.conf'.format(self.QUAGGA_VOLUME)
else:
cmd = 'ospfd -u root -g root -f {0}/ospfd.conf'.format(self.SHARED_VOLUME)
self.local(cmd, detach=True)
def run(self):
super(GoBGPContainer, self).run()
if self.zebra:
self._start_zebra()
if self.ospfd_config:
self._start_ospfd()
self._start_gobgp()
return self.WAIT_FOR_BOOT
@staticmethod
def _get_as_path(path):
asps = (p['as_paths'] for p in path['attrs']
if p['type'] == BGP_ATTR_TYPE_AS_PATH and 'as_paths' in p and p['as_paths'] is not None)
asps = chain.from_iterable(asps)
asns = (asp['asns'] for asp in asps)
return list(chain.from_iterable(asns))
@staticmethod
def _get_nexthop(path):
for p in path['attrs']:
if p['type'] == BGP_ATTR_TYPE_NEXT_HOP or p['type'] == BGP_ATTR_TYPE_MP_REACH_NLRI:
return p['nexthop']
@staticmethod
def _get_local_pref(path):
for p in path['attrs']:
if p['type'] == BGP_ATTR_TYPE_LOCAL_PREF:
return p['value']
return None
@staticmethod
def _get_med(path):
for p in path['attrs']:
if p['type'] == BGP_ATTR_TYPE_MULTI_EXIT_DISC:
return p['metric']
return None
def _trigger_peer_cmd(self, cmd, peer):
peer_addr = self.peer_name(peer)
cmd = 'gobgp neighbor {0} {1}'.format(peer_addr, cmd)
self.local(cmd)
def disable_peer(self, peer):
self._trigger_peer_cmd('disable', peer)
def enable_peer(self, peer):
self._trigger_peer_cmd('enable', peer)
def reset(self, peer):
self._trigger_peer_cmd('reset', peer)
def softreset(self, peer, rf='ipv4', type='in'):
self._trigger_peer_cmd('softreset{0} -a {1}'.format(type, rf), peer)
def get_local_rib(self, peer, prefix='', rf='ipv4'):
peer_addr = self.peer_name(peer)
cmd = 'gobgp -j neighbor {0} local {1} -a {2}'.format(peer_addr, prefix, rf)
output = self.local(cmd, capture=True)
ret = json.loads(output)
dsts = []
for k, v in ret.iteritems():
for p in v:
p["nexthop"] = self._get_nexthop(p)
p["aspath"] = self._get_as_path(p)
p["local-pref"] = self._get_local_pref(p)
p["med"] = self._get_med(p)
p["prefix"] = k
dsts.append({'paths': v, 'prefix': k})
return dsts
def get_global_rib(self, prefix='', rf='ipv4'):
cmd = 'gobgp -j global rib {0} -a {1}'.format(prefix, rf)
output = self.local(cmd, capture=True)
ret = json.loads(output)
dsts = []
for k, v in ret.iteritems():
for p in v:
p["nexthop"] = self._get_nexthop(p)
p["aspath"] = self._get_as_path(p)
p["local-pref"] = self._get_local_pref(p)
p["med"] = self._get_med(p)
p["prefix"] = k
dsts.append({'paths': v, 'prefix': k})
return dsts
def monitor_global_rib(self, queue, rf='ipv4'):
host = self.ip_addrs[0][1].split('/')[0]
if not os.path.exists('{0}/gobgp'.format(self.config_dir)):
self.local('cp /go/bin/gobgp {0}/'.format(self.SHARED_VOLUME))
args = '{0}/gobgp -u {1} -j monitor global rib -a {2}'.format(self.config_dir, host, rf).split(' ')
def monitor():
process = subprocess.Popen(args, stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, ''):
p = json.loads(line)[0]
p["nexthop"] = self._get_nexthop(p)
p["aspath"] = self._get_as_path(p)
p["local-pref"] = self._get_local_pref(p)
p["med"] = self._get_med(p)
queue.put(p)
t = Thread(target=monitor)
t.daemon = True
t.start()
def _get_adj_rib(self, adj_type, peer, prefix='', rf='ipv4'):
peer_addr = self.peer_name(peer)
cmd = 'gobgp neighbor {0} adj-{1} {2} -a {3} -j'.format(peer_addr,
adj_type,
prefix, rf)
output = self.local(cmd, capture=True)
ret = [p[0] for p in json.loads(output).itervalues()]
for p in ret:
p["nexthop"] = self._get_nexthop(p)
p["aspath"] = self._get_as_path(p)
p["prefix"] = p['nlri']['prefix']
p["local-pref"] = self._get_local_pref(p)
p["med"] = self._get_med(p)
return ret
def get_adj_rib_in(self, peer, prefix='', rf='ipv4'):
return self._get_adj_rib('in', peer, prefix, rf)
def get_adj_rib_out(self, peer, prefix='', rf='ipv4'):
return self._get_adj_rib('out', peer, prefix, rf)
def get_neighbor(self, peer):
cmd = 'gobgp -j neighbor {0}'.format(self.peer_name(peer))
return json.loads(self.local(cmd, capture=True))
def get_neighbor_state(self, peer):
return self.get_neighbor(peer)['state']['session-state']
def clear_policy(self):
self.policies = {}
for info in self.peers.itervalues():
info['policies'] = {}
self.prefix_set = []
self.neighbor_set = []
self.statements = []
def set_prefix_set(self, ps):
if not isinstance(ps, list):
ps = [ps]
self.prefix_set = ps
def add_prefix_set(self, ps):
if self.prefix_set is None:
self.prefix_set = []
self.prefix_set.append(ps)
def set_neighbor_set(self, ns):
if not isinstance(ns, list):
ns = [ns]
self.neighbor_set = ns
def add_neighbor_set(self, ns):
if self.neighbor_set is None:
self.neighbor_set = []
self.neighbor_set.append(ns)
def set_bgp_defined_set(self, bs):
self.bgp_set = bs
def create_config(self):
self._create_config_bgp()
if self.zebra:
self._create_config_zebra()
if self.ospfd_config:
self._create_config_ospfd()
def _create_config_bgp(self):
config = {
'global': {
'config': {
'as': self.asn,
'router-id': self.router_id,
},
'route-selection-options': {
'config': {
'external-compare-router-id': True,
},
},
},
'neighbors': [],
}
if self.zebra and self.zapi_version == 2:
config['global']['use-multiple-paths'] = {'config': {'enabled': True}}
for peer, info in self.peers.iteritems():
afi_safi_list = []
if info['interface'] != '':
afi_safi_list.append({'config':{'afi-safi-name': 'ipv4-unicast'}})
afi_safi_list.append({'config':{'afi-safi-name': 'ipv6-unicast'}})
else:
version = netaddr.IPNetwork(info['neigh_addr']).version
if version == 4:
afi_safi_list.append({'config':{'afi-safi-name': 'ipv4-unicast'}})
elif version == 6:
afi_safi_list.append({'config':{'afi-safi-name': 'ipv6-unicast'}})
else:
Exception('invalid ip address version. {0}'.format(version))
if info['vpn']:
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv4-unicast'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv6-unicast'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l2vpn-evpn'}})
afi_safi_list.append({'config': {'afi-safi-name': 'rtc'}, 'route-target-membership': {'config': {'deferral-time': 10}}})
if info['flowspec']:
afi_safi_list.append({'config': {'afi-safi-name': 'ipv4-flowspec'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv4-flowspec'}})
afi_safi_list.append({'config': {'afi-safi-name': 'ipv6-flowspec'}})
afi_safi_list.append({'config': {'afi-safi-name': 'l3vpn-ipv6-flowspec'}})
neigh_addr = None
interface = None
if info['interface'] == '':
neigh_addr = info['neigh_addr'].split('/')[0]
else:
interface = info['interface']
n = {
'config': {
'neighbor-address': neigh_addr,
'neighbor-interface': interface,
'peer-as': peer.asn,
'auth-password': info['passwd'],
'vrf': info['vrf'],
'remove-private-as': info['remove_private_as'],
},
'afi-safis': afi_safi_list,
'timers': {
'config': {
'connect-retry': 10,
},
},
'transport': {
'config': {},
},
}
n['as-path-options'] = {'config': {}}
if info['allow_as_in'] > 0:
n['as-path-options']['config']['allow-own-as'] = info['allow_as_in']
if info['replace_peer_as']:
n['as-path-options']['config']['replace-peer-as'] = info['replace_peer_as']
if ':' in info['local_addr']:
n['transport']['config']['local-address'] = info['local_addr'].split('/')[0]
if info['passive']:
n['transport']['config']['passive-mode'] = True
if info['is_rs_client']:
n['route-server'] = {'config': {'route-server-client': True}}
if info['local_as']:
n['config']['local-as'] = info['local_as']
if info['prefix_limit']:
for v in afi_safi_list:
v['prefix-limit'] = {'config': {'max-prefixes': info['prefix_limit'], 'shutdown-threshold-pct': 80}}
if info['graceful_restart'] is not None:
n['graceful-restart'] = {'config': {'enabled': True, 'restart-time': 20}}
for afi_safi in afi_safi_list:
afi_safi['mp-graceful-restart'] = {'config': {'enabled': True}}
if info['llgr'] is not None:
n['graceful-restart']['config']['restart-time'] = 1
n['graceful-restart']['config']['long-lived-enabled'] = True
for afi_safi in afi_safi_list:
afi_safi['long-lived-graceful-restart'] = {'config': {'enabled': True, 'restart-time': 30}}
if info['is_rr_client']:
cluster_id = self.router_id
if 'cluster_id' in info and info['cluster_id'] is not None:
cluster_id = info['cluster_id']
n['route-reflector'] = {'config': {'route-reflector-client': True,
'route-reflector-cluster-id': cluster_id}}
if len(info.get('default-policy', [])) + len(info.get('policies', [])) > 0:
n['apply-policy'] = {'config': {}}
for typ, p in info.get('policies', {}).iteritems():
n['apply-policy']['config']['{0}-policy-list'.format(typ)] = [p['name']]
def _f(v):
if v == 'reject':
return 'reject-route'
elif v == 'accept':
return 'accept-route'
raise Exception('invalid default policy type {0}'.format(v))
for typ, d in info.get('default-policy', {}).iteritems():
n['apply-policy']['config']['default-{0}-policy'.format(typ)] = _f(d)
config['neighbors'].append(n)
config['defined-sets'] = {}
if self.prefix_set:
config['defined-sets']['prefix-sets'] = self.prefix_set
if self.neighbor_set:
config['defined-sets']['neighbor-sets'] = self.neighbor_set
if self.bgp_set:
config['defined-sets']['bgp-defined-sets'] = self.bgp_set
policy_list = []
for p in self.policies.itervalues():
policy = {'name': p['name']}
if 'statements' in p:
policy['statements'] = p['statements']
policy_list.append(policy)
if len(policy_list) > 0:
config['policy-definitions'] = policy_list
if self.zebra:
config['zebra'] = {'config': {'enabled': True,
'redistribute-route-type-list': ['connect'],
'version': self.zapi_version}}
with open('{0}/gobgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new gobgpd.conf]'.format(self.name))
if self.config_format is 'toml':
raw = toml.dumps(config)
elif self.config_format is 'yaml':
raw = yaml.dump(config)
elif self.config_format is 'json':
raw = json.dumps(config)
else:
raise Exception('invalid config_format {0}'.format(self.config_format))
print colors.yellow(indent(raw))
f.write(raw)
def _create_config_zebra(self):
c = CmdBuffer()
c << 'hostname zebra'
c << 'password zebra'
c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
c << 'debug zebra packet'
c << 'debug zebra kernel'
c << 'debug zebra rib'
c << ''
with open('{0}/zebra.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new zebra.conf]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def _create_config_ospfd(self):
c = CmdBuffer()
c << 'hostname ospfd'
c << 'password zebra'
c << 'router ospf'
for redistribute in self.ospfd_config.get('redistributes', []):
c << ' redistribute {0}'.format(redistribute)
for network, area in self.ospfd_config.get('networks', {}).items():
c << ' network {0} area {1}'.format(network, area)
c << 'log file {0}/ospfd.log'.format(self.SHARED_VOLUME)
c << ''
with open('{0}/ospfd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new ospfd.conf]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def reload_config(self):
daemon = ['gobgpd']
if self.zebra:
daemon.append('zebra')
if self.ospfd_config:
daemon.append('ospfd')
for d in daemon:
cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
self.local(cmd)
for v in self.routes.itervalues():
if v['rf'] == 'ipv4' or v['rf'] == 'ipv6':
r = CmdBuffer(' ')
r << 'gobgp global -a {0}'.format(v['rf'])
r << 'rib add {0}'.format(v['prefix'])
if v['next-hop']:
r << 'nexthop {0}'.format(v['next-hop'])
if v['local-pref']:
r << 'local-pref {0}'.format(v['local-pref'])
if v['med']:
r << 'med {0}'.format(v['med'])
cmd = str(r)
elif v['rf'] == 'ipv4-flowspec' or v['rf'] == 'ipv6-flowspec':
cmd = 'gobgp global '\
'rib add match {0} then {1} -a {2}'.format(' '.join(v['matchs']), ' '.join(v['thens']), v['rf'])
else:
raise Exception('unsupported route faily: {0}'.format(v['rf']))
self.local(cmd)
class RawGoBGPContainer(GoBGPContainer):
def __init__(self, name, config, ctn_image_name='osrg/gobgp',
log_level='debug', zebra=False, config_format='yaml'):
if config_format is 'toml':
d = toml.loads(config)
elif config_format is 'yaml':
d = yaml.load(config)
elif config_format is 'json':
d = json.loads(config)
else:
raise Exception('invalid config format {0}'.format(config_format))
asn = d['global']['config']['as']
router_id = d['global']['config']['router-id']
self.config = config
super(RawGoBGPContainer, self).__init__(name, asn, router_id,
ctn_image_name, log_level,
zebra, config_format)
def create_config(self):
with open('{0}/gobgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new gobgpd.conf]'.format(self.name))
print colors.yellow(indent(self.config))
f.write(self.config)
| |
# -*- coding: utf-8 -*-
"""
Fields for Discontinous Galerkin method
"""
import numpy as nm
import six
from numpy.lib.stride_tricks import as_strided
from six.moves import range
from sfepy.base.base import (output, assert_, Struct)
from sfepy.discrete import Integral, PolySpace
from sfepy.discrete.common.fields import parse_shape
from sfepy.discrete.fem.fields_base import FEField
from sfepy.discrete.fem.mappings import VolumeMapping
def get_unraveler(n_el_nod, n_cell):
"""Returns function for unraveling i.e. unpacking dof data from
serialized array from shape (n_el_nod*n_cell, 1) to (n_cell, n_el_nod, 1).
The unraveler returns non-writeable view into the input array.
Parameters
----------
n_el_nod : int
expected dimensions of dofs array
n_cell : int
Returns
-------
unravel : callable
"""
def unravel(u):
"""Returns non-writeable view into the input array reshaped to
(n*m, 1) to (m, n, 1) .
Parameters
----------
u : array_like
solution in shape (n*m, 1)
Returns
-------
u : ndarray
unraveledsolution in shape (m, n, 1)
"""
ustride1 = u.strides[0]
ur = as_strided(u,
shape=(n_cell, n_el_nod, 1),
strides=(n_el_nod * ustride1, ustride1, ustride1),
writeable=False)
return ur
return unravel
def get_raveler(n_el_nod, n_cell):
"""Returns function for raveling i.e. packing dof data from
two dimensional array of shape (n_cell, n_el_nod, 1) to (n_el_nod*n_cell, 1)
The raveler returns view into the input array.
Parameters
----------
n_el_nod :
param n_el_nod, n_cell: expected dimensions of dofs array
n_cell : int
Returns
-------
ravel : callable
"""
def ravel(u):
"""Returns view into the input array reshaped from (m, n, 1) to (n*m, 1)
to (m, n, 1) .
Parameters
----------
u : array_like
Returns
-------
u : ndarray
"""
# ustride1 = u.strides[0]
# ur = as_strided(u, shape=(n_el_nod*n_cell, 1),
# strides=(n_cell*ustride1, ustride1))
ur = nm.ravel(u)[:, None]
# possibly use according to
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.ravel.html
# ur = u.reshape(-1)
return ur
return ravel
# mapping between geometry element types
# and their facets types
# TODO move to sfepy/discrete/fem/geometry_element.py?
cell_facet_gel_name = {
"1_2": "0_1",
"2_3": "1_2",
"2_4": "1_2",
"3_4": "2_3",
"3_8": "2_4"
}
def get_gel(region):
"""
Parameters
----------
region : sfepy.discrete.common.region.Region
Returns
-------
gel :
base geometry element of the region
"""
cmesh = region.domain.cmesh
for key, gel in six.iteritems(region.domain.geom_els):
ct = cmesh.cell_types
if (ct[region.cells] == cmesh.key_to_index[gel.name]).all():
return gel
else:
raise ValueError('Region {} contains multiple'
' reference geometries!'.format(region))
class DGField(FEField):
"""Class for usage with DG terms, provides functionality for Discontinous
Galerkin method like neighbour look up, projection to discontinuous basis
and correct DOF treatment.
"""
family_name = 'volume_DG_legendre_discontinuous'
is_surface = False
def __init__(self, name, dtype, shape, region, space="H1",
poly_space_base="legendre", approx_order=1, integral=None):
"""
Creates DGField, with Legendre polyspace and default integral
corresponding to 2 * approx_order.
Parameters
----------
name : string
dtype : type
shape : string
'vector', 'scalar' or something else
region : sfepy.discrete.common.region.Region
space : string
default "H1"
poly_space_base : PolySpace
optionally force polyspace
approx_order : 0 for FVM, default 1
integral : Integral
if None integral of order 2*approx_order is created
"""
shape = parse_shape(shape, region.domain.shape.dim)
Struct.__init__(self, name=name, dtype=dtype, shape=shape,
region=region)
if isinstance(approx_order, tuple):
self.approx_order = approx_order[0]
else:
self.approx_order = approx_order
# geometry
self.domain = region.domain
self.region = region
self.dim = region.tdim
self._setup_geometry()
self._setup_connectivity()
# TODO treat domains embedded into higher dimensional spaces?
self.n_el_facets = self.dim + 1 if self.gel.is_simplex else 2**self.dim
# approximation space
self.space = space
self.poly_space_base = poly_space_base
self.force_bubble = False
self._create_interpolant()
# DOFs
self._setup_shape()
self._setup_all_dofs()
self.ravel_sol = get_raveler(self.n_el_nod, self.n_cell)
self.unravel_sol = get_unraveler(self.n_el_nod, self.n_cell)
# integral
self.clear_qp_base()
self.clear_facet_qp_base()
if integral is None:
self.integral = Integral("dg_fi", order = 2 * self.approx_order)
else:
self.integral = integral
self.ori = None
self.basis_transform = None
# mapping
self.mappings = {}
self.mapping = self.create_mapping(self.region, self.integral, "volume",
return_mapping=True)[1]
self.mappings0 = {}
# neighbour facet mapping and data caches
# TODO use lru cache or different method?
self.clear_facet_neighbour_idx_cache()
self.clear_normals_cache()
self.clear_facet_vols_cache()
self.boundary_facet_local_idx = {}
def _create_interpolant(self):
name = self.gel.name + '_DG_legendre'
ps = PolySpace.any_from_args(name, self.gel, self.approx_order,
base=self.poly_space_base,
force_bubble=False)
self.poly_space = ps
# 'legendre_simplex' is created for '1_2'.
if self.gel.name in ["2_4", "3_8"]:
self.extended = True
else:
self.extended = False
def _setup_all_dofs(self):
"""Sets up all the differet kinds of DOFs, for DG only bubble DOFs"""
self.n_el_nod = self.poly_space.n_nod
self.n_vertex_dof = 0 # in DG we will propably never need vertex DOFs
self.n_edge_dof = 0 # use facets DOFS for AFS methods
self.n_face_dof = 0 # use facet DOF for AFS methods
(self.n_bubble_dof,
self.bubble_remap,
self.bubble_dofs) = self._setup_bubble_dofs()
self.n_nod = self.n_vertex_dof + self.n_edge_dof \
+ self.n_face_dof + self.n_bubble_dof
def _setup_bubble_dofs(self):
"""Creates DOF information for so called element, cell or bubble DOFs
- the only DOFs used in DG
n_dof = n_cells * n_el_nod
remap optional remapping between cells
dofs is mapping between dofs and cells
Returns
-------
n_dof : int
remap : ndarray
dofs : ndarray
"""
self.n_cell = self.region.get_n_cells(self.is_surface)
n_dof = self.n_cell * self.n_el_nod
dofs = nm.arange(n_dof, dtype=nm.int32)\
.reshape(self.n_cell, self.n_el_nod)
remap = nm.arange(self.n_cell)
self.econn = dofs
self.dofs2cells = nm.repeat(nm.arange(self.n_cell), self.n_el_nod)
return n_dof, remap, dofs
def _setup_shape(self):
"""What is shape used for and what it really means.
Does it represent shape of the problem?
"""
self.n_components = nm.prod(self.shape)
self.val_shape = self.shape
def _setup_geometry(self):
"""Setup the field region geometry."""
# get_gel extracts the highest dimension geometry from self.region
self.gel = get_gel(self.region)
def _setup_connectivity(self):
"""Forces self.domain.mesh to build necessary conductivities
so they are available in self.get_nbrhd_dofs
"""
self.region.domain.mesh.cmesh.setup_connectivity(self.dim, self.dim)
self.region.domain.mesh.cmesh.setup_connectivity(self.dim - 1, self.dim)
self.region.domain.mesh.cmesh.setup_connectivity(self.dim, self.dim - 1)
def get_coor(self, nods=None):
"""Returns coors for matching nodes
# TODO revise DG_EPBC and EPBC matching?
Parameters
----------
nods :
if None use all nodes (Default value = None)
Returns
-------
coors : ndarray
coors on surface
"""
if nods is None:
nods = self.bubble_dofs
cells = self.dofs2cells[nods]
coors = self.domain.mesh.cmesh.get_centroids(self.dim)[cells]
eps = min(self.domain.cmesh.get_volumes(self.dim)) / (self.n_el_nod + 2)
if self.dim == 1:
extended_coors = nm.zeros(nm.shape(coors)[:-1] + (2,))
extended_coors[:, 0] = coors[:, 0]
coors = extended_coors
# shift centroid coors to lie within cells but be different for each dof
# use coors of facet QPs?
coors += eps * nm.repeat(nm.arange(self.n_el_nod),
len(nm.unique(cells)))[:, None]
return coors
def clear_facet_qp_base(self):
"""Clears facet_qp_base cache"""
self.facet_bf = {}
self.facet_qp = None
self.facet_whs = None
def _transform_qps_to_facets(self, qps, geo_name):
"""Transforms points given in qps to all facets of the reference element
with geometry geo_name.
Parameters
----------
qps :
qps corresponding to facet dimension to be transformed
geo_name :
element type
Returns
-------
tqps : ndarray
tqps is of shape shape(qps) + (n_el_facets, geo dim)
"""
if geo_name == "1_2":
tqps = nm.zeros(nm.shape(qps) + (2, 1,))
tqps[..., 0, 0] = 0.
tqps[..., 1, 0] = 1.
elif geo_name == "2_3":
tqps = nm.zeros(nm.shape(qps) + (3, 2,))
# 0.
tqps[..., 0, 0] = qps # x = 0 + t
tqps[..., 0, 1] = 0. # y = 0
# 1.
tqps[..., 1, 0] = 1 - qps # x = 1 - t
tqps[..., 1, 1] = qps # y = t
# 2.
tqps[..., 2, 0] = 0 # x = 0
tqps[..., 2, 1] = 1 - qps # y = 1 - t
elif geo_name == "2_4":
tqps = nm.zeros(nm.shape(qps) + (4, 2,))
# 0.
tqps[..., 0, 0] = qps # x = t
tqps[..., 0, 1] = 0. # y = 0
# 1.
tqps[..., 1, 0] = 1 # x = 1
tqps[..., 1, 1] = qps # y = t
# 2.
tqps[..., 2, 0] = 1 - qps # x = 1 -t
tqps[..., 2, 1] = 1 # y = 1
# 3.
tqps[..., 3, 0] = 0 # x = 0
tqps[..., 3, 1] = 1 - qps # y = 1 - t
elif geo_name == "3_4":
# tqps = nm.zeros(nm.shape(qps) + (4, 3,))
raise NotImplementedError("Geometry {} not supported, yet"
.format(geo_name))
elif geo_name == "3_8":
# tqps = nm.zeros(nm.shape(qps) + (8, 3,))
raise NotImplementedError("Geometry {} not supported, yet"
.format(geo_name))
else:
raise NotImplementedError("Geometry {} not supported, yet"
.format(geo_name))
return tqps
def get_facet_qp(self):
"""Returns quadrature points on all facets of the reference element in
array of shape (n_qp, 1 , n_el_facets, dim)
Returns
-------
qps : ndarray
quadrature points
weights : ndarray
Still needs to be transformed to actual facets!
"""
if self.dim == 1:
facet_qps = self._transform_qps_to_facets(nm.zeros((1, 1)), "1_2")
weights = nm.ones((1, 1, 1))
else:
qps, weights = self.integral.get_qp(cell_facet_gel_name[self.gel.name])
weights = weights[None, :, None]
facet_qps = self._transform_qps_to_facets(qps, self.gel.name)
return facet_qps, weights
def get_facet_base(self, derivative=False, base_only=False):
"""
Returns values of base in facets quadrature points, data shape is a bit
crazy right now:
(number of qps, 1, n_el_facets, 1, n_el_nod)
end for derivatine:
(1, number of qps, (dim,) * derivative, n_el_facets, 1, n_el_nod)
Parameters
----------
derivative: truthy or integer
base_only: do not return weights
Returns
-------
facet_bf : ndarray
values of basis functions in facet qps
weights : ndarray, optionally
weights of qps
"""
if derivative:
diff = int(derivative)
else:
diff = 0
if diff in self.facet_bf:
facet_bf = self.facet_bf[diff]
whs = self.facet_whs
else:
qps, whs = self.get_facet_qp()
ps = self.poly_space
self.facet_qp = qps
self.facet_whs = whs
if derivative:
facet_bf = nm.zeros((1,) + nm.shape(qps)[:-1] +
(self.dim,) * diff + (self.n_el_nod,))
else:
facet_bf = nm.zeros(nm.shape(qps)[:-1] + (1, self.n_el_nod,))
for i in range(self.n_el_facets):
facet_bf[..., i, :, :] = \
ps.eval_base(qps[..., i, :], diff=diff,
transform=self.basis_transform)
self.facet_bf[diff] = facet_bf
if base_only:
return facet_bf
else:
return facet_bf, whs
def clear_facet_neighbour_idx_cache(self, region=None):
"""
If region is None clear all!
Parameters
----------
region : sfepy.discrete.common.region.Region
If None clear all.
"""
if region is None:
self.facet_neighbour_index = {}
else:
self.facet_neighbour_index.pop(region.name)
def get_facet_neighbor_idx(self, region=None, eq_map=None):
"""
Returns index of cell neighbours sharing facet, along with local index
of the facet within neighbour, also treats periodic boundary conditions
i.e. plugs correct neighbours for cell on periodic boundary.
Where there are no neighbours specified puts -1 instead of neighbour
and facet id
Cashes neighbour index in self.facet_neighbours
Parameters
----------
region : sfepy.discrete.common.region.Region
Main region, must contain cells.
eq_map :
eq_map from state variable containing information on
EPBC and DG EPBC. (Default value = None)
Returns
-------
facet_neighbours : ndarray
Shape is
(n_cell, n_el_facet, 2),
first value is index of the neighbouring cell,
the second is index of the facet in said nb. cell.
"""
if region is None or eq_map is None:
# HOTFIX enabling limiter to obtain connectivity data without
# knowing eq_map or region
if self.region.name in self.facet_neighbour_index:
return self.facet_neighbour_index[self.region.name]
else:
raise ValueError("No facet neighbour mapping for main " +
"region {}".format(self.region.name) +
" cached yet, call with region and " +
"eq_map first.")
if region.name in self.facet_neighbour_index:
return self.facet_neighbour_index[region.name]
dim, n_cell, n_el_facets = self.get_region_info(region)
cmesh = region.domain.mesh.cmesh
cells = region.cells
facet_neighbours = nm.zeros((n_cell, n_el_facets, 2), dtype=nm.int32)
c2fi, c2fo = cmesh.get_incident(dim - 1, cells, dim, ret_offsets=True)
for ic, o1 in enumerate(c2fo[:-1]): # loop over cells
o2 = c2fo[ic + 1]
# get neighbours per facet of the cell
c2ci, c2co = cmesh.get_incident(dim, c2fi[o1:o2], dim - 1,
ret_offsets=True)
ii = cmesh.get_local_ids(c2fi[o1:o2], dim - 1, c2ci, c2co, dim)
fis = nm.c_[c2ci, ii]
nbrs = []
for ifa, of1 in enumerate(c2co[:-1]): # loop over facets
of2 = c2co[ifa + 1]
if of2 == (of1 + 1): # facet has only one cell
# Surface facet.
nbrs.append([-1, -1]) # c2ci[of1]) # append no neighbours
else:
if c2ci[of1] == cells[ic]: # do not append the cell itself
nbrs.append(fis[of2 - 1])
else:
nbrs.append(fis[of1])
facet_neighbours[ic, :, :] = nbrs
facet_neighbours = \
self._set_fem_periodic_facet_neighbours(facet_neighbours, eq_map)
facet_neighbours = \
self._set_dg_periodic_facet_neighbours(facet_neighbours, eq_map)
# cache results
self.facet_neighbour_index[region.name] = facet_neighbours
return facet_neighbours
def _set_dg_periodic_facet_neighbours(self, facet_neighbours, eq_map):
"""
Parameters
----------
facet_neighbours : array_like
Shape is
(n_cell, n_el_facet, 2),
first value is index of the neighbouring cell
the second is index of the facet in said nb. cell.
eq_map :
must contain dg_ep_bc a List with pairs of slave and master boundary
cell boundary facet mapping
Returns
-------
facet_neighbours : ndarray
Updated incidence array.
"""
# if eq_map.
# treat DG EPBC - these are definitely preferred
if eq_map.n_dg_epbc > 0 and self.gel.name not in ["1_2", "2_4", "3_6"]:
raise ValueError(
"Periodic boundary conditions not supported " +
"for geometry {} elements.".format(self.gel.name))
dg_epbc = eq_map.dg_epbc
for master_bc2bfi, slave_bc2bfi in dg_epbc:
# set neighbours of periodic cells to one another
facet_neighbours[master_bc2bfi[:, 0], master_bc2bfi[:, 1], 0] = \
slave_bc2bfi[:, 0]
facet_neighbours[slave_bc2bfi[:, 0], slave_bc2bfi[:, 1], 0] = \
master_bc2bfi[:, 0]
# set neighbours facets
facet_neighbours[slave_bc2bfi[:, 0], slave_bc2bfi[:, 1], 1] = \
master_bc2bfi[:, 1]
facet_neighbours[master_bc2bfi[:, 0], master_bc2bfi[:, 1], 1] =\
slave_bc2bfi[:, 1]
return facet_neighbours
def _set_fem_periodic_facet_neighbours(self, facet_neighbours, eq_map):
"""Maybe remove after DG EPBC revision in self.get_coor
Parameters
----------
facet_neighbours : array_like
Shape is (n_cell, n_el_facet, 2), first value is index of the
neighbouring cell the second is index of the facet in said nb. cell.
eq_map :
eq_map from state variable containing information on
EPBC and DG EPBC.
Returns
-------
facet_neighbours : ndarray
Updated incidence array.
"""
# treat classical FEM EPBCs - we need to correct neighbours
if eq_map.n_epbc > 0:
# set neighbours of periodic cells to one another
mcells = nm.unique(self.dofs2cells[eq_map.master])
scells = nm.unique(self.dofs2cells[eq_map.slave])
mcells_facets = nm.array(
nm.where(facet_neighbours[mcells] == -1))[1, 0] # facets mcells
scells_facets = nm.array(
nm.where(facet_neighbours[scells] == -1))[1, 0] # facets scells
# [1, 0] above, first we need second axis to get axis on which
# facet indices are stored, second we drop axis with neighbour
# local facet index,
#
# for multiple s/mcells this will have to be
# something like 1 + 2*nm.arange(len(mcells)) - to skip double
# entries for -1 tags in neighbours and neighbour local facet idx
# set neighbours of mcells to scells
facet_neighbours[mcells, mcells_facets, 0] = scells
# set neighbour facets to facets of scell missing neighbour
facet_neighbours[
mcells, mcells_facets, 1] = scells_facets
# we do not need to distinguish EBC and EPBC cells, EBC overwrite
# EPBC, we only need to fix shapes
# set neighbours of scells to mcells
facet_neighbours[scells, scells_facets, 0] = mcells
# set neighbour facets to facets of mcell missing neighbour0
facet_neighbours[
scells, scells_facets, 1] = mcells_facets
return facet_neighbours
@staticmethod
def get_region_info(region):
"""
Extracts information about region needed in various methods of DGField
Parameters
----------
region : sfepy.discrete.common.region.Region
Returns
-------
dim, n_cell, n_el_facets
"""
if not region.has_cells():
raise ValueError("Region {} has no cells".format(region.name))
n_cell = region.get_n_cells()
dim = region.tdim
gel = get_gel(region)
n_el_facets = dim + 1 if gel.is_simplex else 2 ** dim
return dim, n_cell, n_el_facets
def get_both_facet_state_vals(self, state, region,
derivative=None, reduce_nod=True):
"""Computes values of the variable represented by dofs in
quadrature points located at facets, returns both values -
inner and outer, along with weights.
Parameters
----------
state : state variable containing BC info
region : sfepy.discrete.common.region.Region
derivative : compute derivative if truthy,
compute n-th derivative if a number (Default value = None)
reduce_nod : if False DOES NOT sum nodes into values at QPs
(Default value = True)
Returns
-------
inner_facet_values (n_cell, n_el_facets, n_qp),
outer facet values (n_cell, n_el_facets, n_qp),
weights,
if derivative is True:
inner_facet_values (n_cell, n_el_facets, dim, n_qp),
outer_facet values (n_cell, n_el_facets, dim, n_qp)
"""
if derivative:
diff = int(derivative)
else:
diff = 0
unreduce_nod = int(not reduce_nod)
inner_base_vals, outer_base_vals, whs = \
self.get_both_facet_base_vals(state, region, derivative=derivative)
dofs = self.unravel_sol(state.data[0])
n_qp = whs.shape[-1]
outputs_shape = (self.n_cell, self.n_el_facets) + \
(self.n_el_nod,) * unreduce_nod + \
(self.dim,) * diff + \
(n_qp,)
inner_facet_vals = nm.zeros(outputs_shape)
if unreduce_nod:
inner_facet_vals[:] = nm.einsum('id...,idf...->ifd...',
dofs, inner_base_vals)
else:
inner_facet_vals[:] = nm.einsum('id...,id...->i...',
dofs, inner_base_vals)
per_facet_neighbours = self.get_facet_neighbor_idx(region, state.eq_map)
outer_facet_vals = nm.zeros(outputs_shape)
for facet_n in range(self.n_el_facets):
if unreduce_nod:
outer_facet_vals[:, facet_n, :] = \
nm.einsum('id...,id...->id...',
dofs[per_facet_neighbours[:, facet_n, 0]],
outer_base_vals[:, :, facet_n])
else:
outer_facet_vals[:, facet_n, :] = \
nm.einsum('id...,id...->i...',
dofs[per_facet_neighbours[:, facet_n, 0]],
outer_base_vals[:, :, facet_n])
boundary_cells = nm.array(nm.where(per_facet_neighbours[:, :, 0] < 0)).T
outer_facet_vals[boundary_cells[:, 0], boundary_cells[:, 1]] = 0.0
# TODO detect and print boundary cells without defined BCs?
for ebc, ebc_vals in zip(state.eq_map.dg_ebc.get(diff, []),
state.eq_map.dg_ebc_val.get(diff, [])):
if unreduce_nod:
raise NotImplementedError(
"Unreduced DOFs are not available for boundary " +
"outerfacets")
outer_facet_vals[ebc[:, 0], ebc[:, 1], :] = \
nm.einsum("id,id...->id...",
ebc_vals, inner_base_vals[0, :, ebc[:, 1]])
else:
# fix flipping qp order to accomodate for
# opposite facet orientation of neighbours
outer_facet_vals[ebc[:, 0], ebc[:, 1], :] = ebc_vals[:, ::-1]
# flip outer_facet_vals moved to get_both_facet_base_vals
return inner_facet_vals, outer_facet_vals, whs
def get_both_facet_base_vals(self, state, region, derivative=None):
"""Returns values of the basis function in quadrature points on facets
broadcasted to all cells inner to the element as well as outer ones
along with weights for the qps broadcasted and transformed to elements.
Contains quick fix to flip facet QPs for right integration order.
Parameters
----------
state : used to get EPBC info
region : sfepy.discrete.common.region.Region for connectivity
derivative : if u need derivative
(Default value = None)
Returns
-------
outer_facet_base_vals:
inner_facet_base_vals:
shape (n_cell, n_el_nod, n_el_facet, n_qp) or
(n_cell, n_el_nod, n_el_facet, dim, n_qp)
when derivative is True or 1
whs: shape (n_cell, n_el_facet, n_qp)
"""
if derivative:
diff = int(derivative)
else:
diff = 0
facet_bf, whs = self.get_facet_base(derivative=derivative)
n_qp = nm.shape(whs)[1]
facet_vols = self.get_facet_vols(region)
whs = facet_vols * whs[None, :, :, 0]
base_shape = (self.n_cell, self.n_el_nod, self.n_el_facets) + \
(self.dim,) * diff + \
(n_qp,)
inner_facet_base_vals = nm.zeros(base_shape)
outer_facet_base_vals = nm.zeros(base_shape)
if derivative:
inner_facet_base_vals[:] = facet_bf[0, :, 0, :, :, :]\
.swapaxes(-2, -3).T
else:
inner_facet_base_vals[:] = facet_bf[:, 0, :, 0, :].T
per_facet_neighbours = self.get_facet_neighbor_idx(region, state.eq_map)
# numpy prepends shape resulting from multiple
# indexing before remaining shape
if derivative:
outer_facet_base_vals[:] = \
inner_facet_base_vals[0, :, per_facet_neighbours[:, :, 1]]\
.swapaxes(-3, -4)
else:
outer_facet_base_vals[:] = \
inner_facet_base_vals[0, :, per_facet_neighbours[:, :, 1]]\
.swapaxes(-2, -3)
# fix to flip facet QPs for right integration order
return inner_facet_base_vals, outer_facet_base_vals[..., ::-1], whs
def clear_normals_cache(self, region=None):
"""Clears normals cache for given region or all regions.
Parameters
----------
region : sfepy.discrete.common.region.Region
region to clear cache or None to clear all
"""
if region is None:
self.normals_cache = {}
else:
if isinstance(region, str):
self.normals_cache.pop(region)
else:
self.normals_cache.pop(region.name)
def get_cell_normals_per_facet(self, region):
"""Caches results, use clear_normals_cache to clear the cache.
Parameters
----------
region: sfepy.discrete.common.region.Region
Main region, must contain cells.
Returns
-------
normals: ndarray
normals of facets in array of shape (n_cell, n_el_facets, dim)
"""
if region.name in self.normals_cache:
return self.normals_cache[region.name]
dim, n_cell, n_el_facets = self.get_region_info(region)
cmesh = region.domain.mesh.cmesh
normals = cmesh.get_facet_normals()
normals_out = nm.zeros((n_cell, n_el_facets, dim))
c2f = cmesh.get_conn(dim, dim - 1)
for ic, o1 in enumerate(c2f.offsets[:-1]):
o2 = c2f.offsets[ic + 1]
for ifal, ifa in enumerate(c2f.indices[o1:o2]):
normals_out[ic, ifal] = normals[o1 + ifal]
self.normals_cache[region.name] = normals_out
return normals_out
def clear_facet_vols_cache(self, region=None):
"""Clears facet volume cache for given region or all regions.
Parameters
----------
region : sfepy.discrete.common.region.Region
region to clear cache or None to clear all
"""
if region is None:
self.facet_vols_cache = {}
else:
if isinstance(region, str):
self.facet_vols_cache.pop(region)
else:
self.facet_vols_cache.pop(region.name)
def get_facet_vols(self, region):
"""Caches results, use clear_facet_vols_cache to clear the cache
Parameters
----------
region : sfepy.discrete.common.region.Region
Returns
-------
vols_out: ndarray
volumes of the facets by cells shape (n_cell, n_el_facets, 1)
"""
if region.name in self.facet_vols_cache:
return self.facet_vols_cache[region.name]
dim, n_cell, n_el_facets = self.get_region_info(region)
cmesh = region.domain.mesh.cmesh
if dim == 1:
vols = nm.ones((cmesh.num[0], 1))
else:
vols = cmesh.get_volumes(dim - 1)[:, None]
vols_out = nm.zeros((n_cell, n_el_facets, 1))
c2f = cmesh.get_conn(dim, dim - 1)
for ic, o1 in enumerate(c2f.offsets[:-1]):
o2 = c2f.offsets[ic + 1]
for ifal, ifa in enumerate(c2f.indices[o1:o2]):
vols_out[ic, ifal] = vols[ifa]
self.facet_vols_cache[region.name] = vols_out
return vols_out
def get_data_shape(self, integral, integration='volume', region_name=None):
"""Returns data shape
(n_nod, n_qp, self.gel.dim, self.n_el_nod)
Parameters
----------
integral : integral used
integration :
'volume' is only supported value (Default value = 'volume')
region_name : not used
(Default value = None)
Returns
-------
data_shape : tuple
"""
if integration in ('volume',):
# from FEField.get_data_shape()
_, weights = integral.get_qp(self.gel.name)
n_qp = weights.shape[0]
data_shape = (self.n_cell, n_qp, self.gel.dim, self.n_el_nod)
# econn.shape[1] == n_el_nod i.e. number nod in element
else:
raise NotImplementedError('unsupported integration! (%s)'
% integration)
return data_shape
def get_econn(self, conn_type, region, is_trace=False, integration=None):
"""Getter for econn
Parameters
----------
conn_type : string or Struct
'volume' is only supported
region : sfepy.discrete.common.region.Region
is_trace : ignored
(Default value = False)
integration : ignored
(Default value = None)
Returns
-------
econn : ndarray
connectivity information
"""
ct = conn_type.type if isinstance(conn_type, Struct) else conn_type
if ct == 'volume':
if region.name == self.region.name:
conn = self.econn
else:
raise ValueError("Bad region for the field")
else:
raise ValueError('unknown connectivity type! (%s)' % ct)
return conn
def setup_extra_data(self, geometry, info, is_trace):
"""This is called in create_adof_conns(conn_info, var_indx=None,
active_only=True, verbose=True)
for each variable but has no effect.
Parameters
----------
geometry :
ignored
info :
set to self.info
is_trace :
set to self.trace
"""
# placeholder, what is this used for?
# dct = info.dc_type.type
self.info = info
self.is_trace = is_trace
def get_dofs_in_region(self, region, merge=True):
"""Return indices of DOFs that belong to the given region.
Not Used in BC treatment
Parameters
----------
region : sfepy.discrete.common.region.Region
merge : bool
merge dof tuple into one numpy array, default True
Returns
-------
dofs : ndarray
"""
dofs = []
if region.has_cells(): # main region or its part
els = nm.ravel(self.bubble_remap[region.cells])
eldofs = self.bubble_dofs[els[els >= 0]]
dofs.append(eldofs)
else:
# return indices of cells adjacent to boundary facets
dim = self.dim
cmesh = region.domain.mesh.cmesh
bc_cells = cmesh.get_incident(dim, region.facets, dim - 1)
bc_dofs = self.bubble_dofs[bc_cells]
dofs.append(bc_dofs)
if merge:
dofs = nm.concatenate(dofs)
return dofs
def get_bc_facet_idx(self, region):
"""Caches results in self.boundary_facet_local_idx
Parameters
----------
region : sfepy.discrete.common.region.Region
surface region defining BCs
Returns
-------
bc2bfi : ndarray
index of cells on boundary along with corresponding facets
"""
if region.name in self.boundary_facet_local_idx:
return self.boundary_facet_local_idx[region.name]
bc2bfi = region.get_facet_indices()
self.boundary_facet_local_idx[region.name] = bc2bfi
return bc2bfi
def create_mapping(self, region, integral, integration,
return_mapping=True):
"""Creates and returns mapping
Parameters
----------
region : sfepy.discrete.common.region.Region
integral : Integral
integration : str
'volume' is only accepted option
return_mapping : default True
(Default value = True)
Returns
-------
mapping : VolumeMapping
"""
domain = self.domain
coors = domain.get_mesh_coors(actual=True)
dconn = domain.get_conn()
# from FEField
if integration == 'volume':
qp = self.get_qp('v', integral)
# qp = self.integral.get_qp(self.gel.name)
iels = region.get_cells()
geo_ps = self.gel.poly_space
ps = self.poly_space
bf = self.get_base('v', 0, integral, iels=iels)
conn = nm.take(dconn, iels.astype(nm.int32), axis=0)
mapping = VolumeMapping(coors, conn, poly_space=geo_ps)
vg = mapping.get_mapping(qp.vals, qp.weights, poly_space=ps,
ori=self.ori,
transform=self.basis_transform)
out = vg
else:
raise ValueError('unsupported integration geometry type: %s'
% integration)
if out is not None:
# Store the integral used.
out.integral = integral
out.qp = qp
out.ps = ps
# Update base.
out.bf[:] = bf
if return_mapping:
out = (out, mapping)
return out
def set_dofs(self, fun=0.0, region=None, dpn=None, warn=None):
"""Compute projection of fun into the basis, alternatively set DOFs
directly to provided value or values either in main volume region
or in boundary region.
Parameters
----------
fun : callable, scalar or array corresponding to dofs
(Default value = 0.0)
region : sfepy.discrete.common.region.Region
region to set DOFs on (Default value = None)
dpn : number of dofs per element
(Default value = None)
warn :
(Default value = None)
Returns
-------
nods : ndarray
vals : ndarray
"""
if region is None:
region = self.region
return self.set_cell_dofs(fun, region, dpn, warn)
elif region.has_cells():
return self.set_cell_dofs(fun, region, dpn, warn)
elif region.kind_tdim == self.dim - 1:
nods, vals = self.set_facet_dofs(fun, region, dpn, warn)
return nods, vals
def set_cell_dofs(self, fun=0.0, region=None, dpn=None, warn=None):
"""
Compute projection of fun onto the basis, in main region, alternatively
set DOFs directly to provided value or values
Parameters
----------
fun : callable, scallar or array corresponding to dofs
(Default value = 0.0)
region : sfepy.discrete.common.region.Region
region to set DOFs on (Default value = None)
dpn : number of dofs per element
(Default value = None)
warn : not used
(Default value = None)
Returns
-------
nods : ndarray
vals : ndarray
"""
aux = self.get_dofs_in_region(region)
nods = nm.unique(nm.hstack(aux))
if nm.isscalar(fun):
vals = nm.zeros(aux.shape)
vals[:, 0] = fun
vals = nm.hstack(vals)
elif isinstance(fun, nm.ndarray):
# useful for testing, allows to pass complete array of dofs as IC
if nm.shape(fun) == nm.shape(nods):
vals = fun
elif callable(fun):
qp, weights = self.integral.get_qp(self.gel.name)
coors = self.mapping.get_physical_qps(qp)
base_vals_qp = self.poly_space.eval_base(qp)[:, 0, :]
# this drops redundant axis that is returned by eval_base due to
# consistency with derivatives
# left hand, so far only orthogonal basis
# for legendre base this can be calculated exactly
# in 1D it is: 1 / (2 * nm.arange(self.n_el_nod) + 1)
lhs_diag = nm.einsum("q,q...->...", weights, base_vals_qp ** 2)
rhs_vec = nm.einsum("q,q...,iq...->i...",
weights, base_vals_qp, fun(coors))
vals = (rhs_vec / lhs_diag)
# plot for 1D
# from utils.visualizer import plot1D_legendre_dofs, reconstruct
# _legendre_dofs
# import matplotlib.pyplot as plt
# plot1D_legendre_dofs(self.domain.mesh.coors, (vals,), fun)
# ww, xx = reconstruct_legendre_dofs(self.domain.mesh.coors, 1,
# vals.T[..., None, None])
# plt.plot(xx, ww[:, 0], label="reconstructed dofs")
# plt.show()
return nods, vals
def set_facet_dofs(self, fun, region, dpn, warn):
"""Compute projection of fun onto the basis on facets, alternatively
set DOFs directly to provided value or values
Parameters
----------
fun : callable, scalar or array corresponding to dofs
region : sfepy.discrete.common.region.Region
region to set DOFs on
dpn : int
number of dofs per element
warn :
not used
Returns
-------
nods : ndarray
vals : ndarray
"""
raise NotImplementedError(
"Setting facet DOFs is not supported with DGField, " +
"use values at qp directly. " +
"This is usually result of using ebc instead of dgebc")
aux = self.get_dofs_in_region(region)
nods = nm.unique(nm.hstack(aux))
if nm.isscalar(fun):
vals = nm.zeros(aux.shape)
vals[:, 0] = fun
vals = nm.hstack(vals)
elif isinstance(fun, nm.ndarray):
assert_(len(fun) == dpn)
vals = nm.zeros(aux.shape)
vals[:, 0] = nm.repeat(fun, vals.shape[0])
elif callable(fun):
vals = nm.zeros(aux.shape)
# set zero DOF to value fun, set other DOFs to zero
# get facets QPs
qp, weights = self.get_facet_qp()
weights = weights[0, :, 0]
qp = qp[:, 0, :, :]
# get facets weights ?
# get coors
bc2bfi = self.get_bc_facet_idx(region)
coors = self.mapping.get_physical_qps(qp)
# get_physical_qps returns data in strange format, swapping
# some axis and flipping qps order
bcoors = coors[bc2bfi[:, 1], ::-1, bc2bfi[:, 0], :]
# get facet basis vals
base_vals_qp = self.poly_space.eval_base(qp)[:, 0, 0, :]
# solve for boundary cell DOFs
bc_val = fun(bcoors)
# this returns singular matrix - projection on the boundary should
# be into facet dim space
#lhs = nm.einsum("q,qd,qc->dc", weights, base_vals_qp, base_vals_qp)
# inv_lhs = nm.linalg.inv(lhs)
# rhs_vec = nm.einsum("q,q...,iq...->i...",
# weights, base_vals_qp, bc_val)
return nods, vals
def get_bc_facet_values(self, fun, region, ret_coors=False, diff=0):
"""Returns values of fun in facet QPs of the region
Parameters
----------
diff: derivative 0 or 1 supported
fun: Function value or values to set qps values to
region : sfepy.discrete.common.region.Region
boundary region
ret_coors: default False,
Return physical coors of qps in shape (n_cell, n_qp, dim).
Returns
-------
vals : ndarray
In shape (n_cell,) + (self.dim,) * diff + (n_qp,)
"""
if region.has_cells():
raise NotImplementedError(
"Region {} has cells and can't be used as boundary region".
format(region))
# get facets QPs
qp, weights = self.get_facet_qp()
weights = weights[0, :, 0]
qp = qp[:, 0, :, :]
n_qp = qp.shape[0]
# get facets weights ?
# get physical coors
bc2bfi = self.get_bc_facet_idx(region)
n_cell = bc2bfi.shape[0]
coors = self.mapping.get_physical_qps(qp)
# get_physical_qps returns data in strange format,
# swapping some axis and flipping qps order
# to get coors in shape (n_facet, n_qp, n_cell, dim)
if len(coors.shape) == 3:
coors = coors[:, None, :, :] # add axis for qps when it is missing
coors = coors.swapaxes(0, 2)
bcoors = coors[bc2bfi[:, 1], ::-1, bc2bfi[:, 0], :]
diff_shape = (self.dim,) * diff
output_shape = (n_cell,) + diff_shape + (n_qp,)
vals = nm.zeros(output_shape)
# we do not need last axis of coors, values are scalars
if nm.isscalar(fun):
if sum(diff_shape) > 1:
output(("Warning: Setting gradient of shape {} "
"in region {} with scalar value {}")
.format(diff_shape, region.name, fun))
vals[:] = fun
elif isinstance(fun, nm.ndarray):
try:
vals[:] = fun[:, None]
except ValueError:
raise ValueError(("Provided values of shape {} could not" +
" be used to set BC qps of shape {} in " +
"region {}")
.format(fun.shape, vals.shape, region.name))
elif callable(fun):
# get boundary values
vals[:] = fun(bcoors)
if ret_coors:
return bcoors, vals
return vals
def get_nodal_values(self, dofs, region, ref_nodes=None):
"""Computes nodal representation of the DOFs
Parameters
---------
dofs : array_like
dofs to transform to nodes
region : ignored
ref_nodes:
reference node to use instead of default qps
Parameters
----------
dofs : array_like
region : Region
ref_nodes : array_like
(Default value = None)
Returns
-------
nodes : ndarray
nodal_vals : ndarray
"""
if ref_nodes is None:
# poly_space could provide special nodes
ref_nodes = self.get_qp('v', self.integral).vals
base_vals_node = self.poly_space.eval_base(ref_nodes)[:, 0, :]
dofs = self.unravel_sol(dofs[:, 0])
nodal_vals = nm.sum(dofs * base_vals_node.T, axis=1)
nodes = self.mapping.get_physical_qps(ref_nodes)
# import matplotlib.pyplot as plt
# plt.plot(nodes[:, 0], nodal_vals)
# plt.show()
return nodes, nodal_vals
def create_output(self, dofs, var_name, dof_names=None,
key=None, extend=True, fill_value=None,
linearization=None):
"""Converts the DOFs corresponding to the field to a dictionary of
output data usable by Mesh.write().
For 1D puts DOFs into vairables u_modal{0} ... u_modal{n}, where
n = approx_order and marks them for writing as cell data.
For 2+D puts dofs into name_cell_nodes and creates sturct with:
mode = "cell_nodes", data and iterpolation scheme.
Also get node values and adds them to dictionary as cell_nodes
Parameters
----------
dofs : ndarray, shape (n_nod, n_component)
The array of DOFs reshaped so that each column corresponds
to one component.
var_name : str
The variable name corresponding to `dofs`.
dof_names : tuple of str
The names of DOF components. (Default value = None)
key : str, optional
The key to be used in the output dictionary instead of the
variable name. (Default value = None)
extend : bool, not used
Extend the DOF values to cover the whole domain.
(Default value = True)
fill_value : float or complex, not used
The value used to fill the missing DOF values if `extend` is True.
(Default value = None)
linearization : Struct or None, not used
The linearization configuration for higher order approximations.
(Default value = None)
Returns
-------
out : dict
"""
out = {}
udofs = self.unravel_sol(dofs)
name = var_name if key is None else key
if self.dim == 1:
for i in range(self.n_el_nod):
out[name + "_modal{}".format(i)] = \
Struct(mode="cell", data=udofs[:, i, None, None])
else:
interpolation_scheme = self.poly_space.get_interpol_scheme()
unravel = get_unraveler(self.n_el_nod, self.n_cell)
out[name + "_cell_nodes"] = Struct(mode="cell_nodes",
data=unravel(dofs)[..., 0],
scheme=interpolation_scheme)
return out
| |
#
# Copyright (c) 2009, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum, Nov 2008
import random
import copy
""" Implementation of the BRICS algorithm from Degen et al. ChemMedChem *3* 1503-7 (2008)
"""
import sys
import re
import random
from rdkit import Chem
from rdkit.Chem import rdChemReactions as Reactions
# These are the definitions that will be applied to fragment molecules:
environs = {
'L1': '[C;D3]([#0,#6,#7,#8])(=O)',
#
# After some discussion, the L2 definitions ("N.pl3" in the original
# paper) have been removed and incorporated into a (almost) general
# purpose amine definition in L5 ("N.sp3" in the paper).
#
# The problem is one of consistency.
# Based on the original definitions you should get the following
# fragmentations:
# C1CCCCC1NC(=O)C -> C1CCCCC1N[2*].[1*]C(=O)C
# c1ccccc1NC(=O)C -> c1ccccc1[16*].[2*]N[2*].[1*]C(=O)C
# This difference just didn't make sense to us. By switching to
# the unified definition we end up with:
# C1CCCCC1NC(=O)C -> C1CCCCC1[15*].[5*]N[5*].[1*]C(=O)C
# c1ccccc1NC(=O)C -> c1ccccc1[16*].[5*]N[5*].[1*]C(=O)C
#
# 'L2':'[N;!R;!D1;!$(N=*)]-;!@[#0,#6]',
# this one turned out to be too tricky to define above, so we set it off
# in its own definition:
# 'L2a':'[N;D3;R;$(N(@[C;!$(C=*)])@[C;!$(C=*)])]',
'L3': '[O;D2]-;!@[#0,#6,#1]',
'L4': '[C;!D1;!$(C=*)]-;!@[#6]',
# 'L5':'[N;!D1;!$(N*!-*);!$(N=*);!$(N-[!C;!#0])]-[#0,C]',
'L5': '[N;!D1;!$(N=*);!$(N-[!#6;!#16;!#0;!#1]);!$([N;R]@[C;R]=O)]',
'L6': '[C;D3;!R](=O)-;!@[#0,#6,#7,#8]',
'L7a': '[C;D2,D3]-[#6]',
'L7b': '[C;D2,D3]-[#6]',
'#L8': '[C;!R;!D1]-;!@[#6]',
'L8': '[C;!R;!D1;!$(C!-*)]',
'L9': '[n;+0;$(n(:[c,n,o,s]):[c,n,o,s])]',
'L10': '[N;R;$(N(@C(=O))@[C,N,O,S])]',
'L11': '[S;D2](-;!@[#0,#6])',
'L12': '[S;D4]([#6,#0])(=O)(=O)',
'L13': '[C;$(C(-;@[C,N,O,S])-;@[N,O,S])]',
'L14': '[c;$(c(:[c,n,o,s]):[n,o,s])]',
'L14b': '[c;$(c(:[c,n,o,s]):[n,o,s])]',
'L15': '[C;$(C(-;@C)-;@C)]',
'L16': '[c;$(c(:c):c)]',
'L16b': '[c;$(c(:c):c)]',
}
reactionDefs = (
# L1
[
('1', '3', '-'),
('1', '5', '-'),
('1', '10', '-'),
],
# L3
[
('3', '4', '-'),
('3', '13', '-'),
('3', '14', '-'),
('3', '15', '-'),
('3', '16', '-'),
],
# L4
[
('4', '5', '-'),
('4', '11', '-'),
],
# L5
[
('5', '12', '-'),
('5', '14', '-'),
('5', '16', '-'),
('5', '13', '-'),
('5', '15', '-'),
],
# L6
[
('6', '13', '-'),
('6', '14', '-'),
('6', '15', '-'),
('6', '16', '-'),
],
# L7
[
('7a', '7b', '='),
],
# L8
[
('8', '9', '-'),
('8', '10', '-'),
('8', '13', '-'),
('8', '14', '-'),
('8', '15', '-'),
('8', '16', '-'),
],
# L9
[
('9', '13', '-'), # not in original paper
('9', '14', '-'), # not in original paper
('9', '15', '-'),
('9', '16', '-'),
],
# L10
[
('10', '13', '-'),
('10', '14', '-'),
('10', '15', '-'),
('10', '16', '-'),
],
# L11
[
('11', '13', '-'),
('11', '14', '-'),
('11', '15', '-'),
('11', '16', '-'),
],
# L12
# none left
# L13
[
('13', '14', '-'),
('13', '15', '-'),
('13', '16', '-'),
],
# L14
[
('14', '14', '-'), # not in original paper
('14', '15', '-'),
('14', '16', '-'),
],
# L15
[
('15', '16', '-'),
],
# L16
[
('16', '16', '-'), # not in original paper
], )
smartsGps = copy.deepcopy(reactionDefs)
for gp in smartsGps:
for j, defn in enumerate(gp):
g1, g2, bnd = defn
r1 = environs['L' + g1]
r2 = environs['L' + g2]
g1 = re.sub('[a-z,A-Z]', '', g1)
g2 = re.sub('[a-z,A-Z]', '', g2)
sma = '[$(%s):1]%s;!@[$(%s):2]>>[%s*]-[*:1].[%s*]-[*:2]' % (r1, bnd, r2, g1, g2)
gp[j] = sma
for gp in smartsGps:
for defn in gp:
try:
t = Reactions.ReactionFromSmarts(defn)
t.Initialize()
except Exception:
print(defn)
raise
environMatchers = {}
for env, sma in environs.items():
environMatchers[env] = Chem.MolFromSmarts(sma)
bondMatchers = []
for i, compats in enumerate(reactionDefs):
tmp = []
for i1, i2, bType in compats:
e1 = environs['L%s' % i1]
e2 = environs['L%s' % i2]
patt = '[$(%s)]%s;!@[$(%s)]' % (e1, bType, e2)
patt = Chem.MolFromSmarts(patt)
tmp.append((i1, i2, bType, patt))
bondMatchers.append(tmp)
reactions = tuple([[Reactions.ReactionFromSmarts(y) for y in x] for x in smartsGps])
reverseReactions = []
for i, rxnSet in enumerate(smartsGps):
for j, sma in enumerate(rxnSet):
rs, ps = sma.split('>>')
sma = '%s>>%s' % (ps, rs)
rxn = Reactions.ReactionFromSmarts(sma)
labels = re.findall(r'\[([0-9]+?)\*\]', ps)
rxn._matchers = [Chem.MolFromSmiles('[%s*]' % x) for x in labels]
reverseReactions.append(rxn)
def FindBRICSBonds(mol, randomizeOrder=False, silent=True):
""" returns the bonds in a molecule that BRICS would cleave
>>> from rdkit import Chem
>>> m = Chem.MolFromSmiles('CCCOCC')
>>> res = list(FindBRICSBonds(m))
>>> res
[((3, 2), ('3', '4')), ((3, 4), ('3', '4'))]
a more complicated case:
>>> m = Chem.MolFromSmiles('CCCOCCC(=O)c1ccccc1')
>>> res = list(FindBRICSBonds(m))
>>> res
[((3, 2), ('3', '4')), ((3, 4), ('3', '4')), ((6, 8), ('6', '16'))]
we can also randomize the order of the results:
>>> random.seed(23)
>>> res = list(FindBRICSBonds(m,randomizeOrder=True))
>>> sorted(res)
[((3, 2), ('3', '4')), ((3, 4), ('3', '4')), ((6, 8), ('6', '16'))]
Note that this is a generator function :
>>> res = FindBRICSBonds(m)
>>> res
<generator object ...>
>>> next(res)
((3, 2), ('3', '4'))
>>> m = Chem.MolFromSmiles('CC=CC')
>>> res = list(FindBRICSBonds(m))
>>> sorted(res)
[((1, 2), ('7', '7'))]
make sure we don't match ring bonds:
>>> m = Chem.MolFromSmiles('O=C1NCCC1')
>>> list(FindBRICSBonds(m))
[]
another nice one, make sure environment 8 doesn't match something connected
to a ring atom:
>>> m = Chem.MolFromSmiles('CC1(C)CCCCC1')
>>> list(FindBRICSBonds(m))
[]
"""
letter = re.compile('[a-z,A-Z]')
indices = list(range(len(bondMatchers)))
bondsDone = set()
if randomizeOrder:
random.shuffle(indices, random=random.random)
envMatches = {}
for env, patt in environMatchers.items():
envMatches[env] = mol.HasSubstructMatch(patt)
for gpIdx in indices:
if randomizeOrder:
compats = bondMatchers[gpIdx][:]
random.shuffle(compats, random=random.random)
else:
compats = bondMatchers[gpIdx]
for i1, i2, bType, patt in compats:
if not envMatches['L' + i1] or not envMatches['L' + i2]:
continue
matches = mol.GetSubstructMatches(patt)
i1 = letter.sub('', i1)
i2 = letter.sub('', i2)
for match in matches:
if match not in bondsDone and (match[1], match[0]) not in bondsDone:
bondsDone.add(match)
yield (((match[0], match[1]), (i1, i2)))
def BreakBRICSBonds(mol, bonds=None, sanitize=True, silent=True):
""" breaks the BRICS bonds in a molecule and returns the results
>>> from rdkit import Chem
>>> m = Chem.MolFromSmiles('CCCOCC')
>>> m2=BreakBRICSBonds(m)
>>> Chem.MolToSmiles(m2,True)
'[3*]O[3*].[4*]CC.[4*]CCC'
a more complicated case:
>>> m = Chem.MolFromSmiles('CCCOCCC(=O)c1ccccc1')
>>> m2=BreakBRICSBonds(m)
>>> Chem.MolToSmiles(m2,True)
'[16*]c1ccccc1.[3*]O[3*].[4*]CCC.[4*]CCC([6*])=O'
can also specify a limited set of bonds to work with:
>>> m = Chem.MolFromSmiles('CCCOCC')
>>> m2 = BreakBRICSBonds(m,[((3, 2), ('3', '4'))])
>>> Chem.MolToSmiles(m2,True)
'[3*]OCC.[4*]CCC'
this can be used as an alternate approach for doing a BRICS decomposition by
following BreakBRICSBonds with a call to Chem.GetMolFrags:
>>> m = Chem.MolFromSmiles('CCCOCC')
>>> m2=BreakBRICSBonds(m)
>>> frags = Chem.GetMolFrags(m2,asMols=True)
>>> [Chem.MolToSmiles(x,True) for x in frags]
['[4*]CCC', '[3*]O[3*]', '[4*]CC']
"""
if not bonds:
#bonds = FindBRICSBonds(mol)
res = Chem.FragmentOnBRICSBonds(mol)
if sanitize:
Chem.SanitizeMol(res)
return res
eMol = Chem.EditableMol(mol)
nAts = mol.GetNumAtoms()
dummyPositions = []
for indices, dummyTypes in bonds:
ia, ib = indices
obond = mol.GetBondBetweenAtoms(ia, ib)
bondType = obond.GetBondType()
eMol.RemoveBond(ia, ib)
da, db = dummyTypes
atoma = Chem.Atom(0)
atoma.SetIsotope(int(da))
atoma.SetNoImplicit(True)
idxa = nAts
nAts += 1
eMol.AddAtom(atoma)
eMol.AddBond(ia, idxa, bondType)
atomb = Chem.Atom(0)
atomb.SetIsotope(int(db))
atomb.SetNoImplicit(True)
idxb = nAts
nAts += 1
eMol.AddAtom(atomb)
eMol.AddBond(ib, idxb, bondType)
if mol.GetNumConformers():
dummyPositions.append((idxa, ib))
dummyPositions.append((idxb, ia))
res = eMol.GetMol()
if sanitize:
Chem.SanitizeMol(res)
if mol.GetNumConformers():
for conf in mol.GetConformers():
resConf = res.GetConformer(conf.GetId())
for ia, pa in dummyPositions:
resConf.SetAtomPosition(ia, conf.GetAtomPosition(pa))
return res
def BRICSDecompose(mol, allNodes=None, minFragmentSize=1, onlyUseReactions=None, silent=True,
keepNonLeafNodes=False, singlePass=False, returnMols=False):
""" returns the BRICS decomposition for a molecule
>>> from rdkit import Chem
>>> m = Chem.MolFromSmiles('CCCOCc1cc(c2ncccc2)ccc1')
>>> res = list(BRICSDecompose(m))
>>> sorted(res)
['[14*]c1ccccn1', '[16*]c1cccc([16*])c1', '[3*]O[3*]', '[4*]CCC', '[4*]C[8*]']
>>> res = list(BRICSDecompose(m,returnMols=True))
>>> res[0]
<rdkit.Chem.rdchem.Mol object ...>
>>> smis = [Chem.MolToSmiles(x,True) for x in res]
>>> sorted(smis)
['[14*]c1ccccn1', '[16*]c1cccc([16*])c1', '[3*]O[3*]', '[4*]CCC', '[4*]C[8*]']
nexavar, an example from the paper (corrected):
>>> m = Chem.MolFromSmiles('CNC(=O)C1=NC=CC(OC2=CC=C(NC(=O)NC3=CC(=C(Cl)C=C3)C(F)(F)F)C=C2)=C1')
>>> res = list(BRICSDecompose(m))
>>> sorted(res)
['[1*]C([1*])=O', '[1*]C([6*])=O', '[14*]c1cc([16*])ccn1', '[16*]c1ccc(Cl)c([16*])c1', '[16*]c1ccc([16*])cc1', '[3*]O[3*]', '[5*]NC', '[5*]N[5*]', '[8*]C(F)(F)F']
it's also possible to keep pieces that haven't been fully decomposed:
>>> m = Chem.MolFromSmiles('CCCOCC')
>>> res = list(BRICSDecompose(m,keepNonLeafNodes=True))
>>> sorted(res)
['CCCOCC', '[3*]OCC', '[3*]OCCC', '[3*]O[3*]', '[4*]CC', '[4*]CCC']
>>> m = Chem.MolFromSmiles('CCCOCc1cc(c2ncccc2)ccc1')
>>> res = list(BRICSDecompose(m,keepNonLeafNodes=True))
>>> sorted(res)
['CCCOCc1cccc(-c2ccccn2)c1', '[14*]c1ccccn1', '[16*]c1cccc(-c2ccccn2)c1', '[16*]c1cccc(COCCC)c1', '[16*]c1cccc([16*])c1', '[3*]OCCC', '[3*]OC[8*]', '[3*]OCc1cccc(-c2ccccn2)c1', '[3*]OCc1cccc([16*])c1', '[3*]O[3*]', '[4*]CCC', '[4*]C[8*]', '[4*]Cc1cccc(-c2ccccn2)c1', '[4*]Cc1cccc([16*])c1', '[8*]COCCC']
or to only do a single pass of decomposition:
>>> m = Chem.MolFromSmiles('CCCOCc1cc(c2ncccc2)ccc1')
>>> res = list(BRICSDecompose(m,singlePass=True))
>>> sorted(res)
['CCCOCc1cccc(-c2ccccn2)c1', '[14*]c1ccccn1', '[16*]c1cccc(-c2ccccn2)c1', '[16*]c1cccc(COCCC)c1', '[3*]OCCC', '[3*]OCc1cccc(-c2ccccn2)c1', '[4*]CCC', '[4*]Cc1cccc(-c2ccccn2)c1', '[8*]COCCC']
setting a minimum size for the fragments:
>>> m = Chem.MolFromSmiles('CCCOCC')
>>> res = list(BRICSDecompose(m,keepNonLeafNodes=True,minFragmentSize=2))
>>> sorted(res)
['CCCOCC', '[3*]OCC', '[3*]OCCC', '[4*]CC', '[4*]CCC']
>>> m = Chem.MolFromSmiles('CCCOCC')
>>> res = list(BRICSDecompose(m,keepNonLeafNodes=True,minFragmentSize=3))
>>> sorted(res)
['CCCOCC', '[3*]OCC', '[4*]CCC']
>>> res = list(BRICSDecompose(m,minFragmentSize=2))
>>> sorted(res)
['[3*]OCC', '[3*]OCCC', '[4*]CC', '[4*]CCC']
"""
global reactions
mSmi = Chem.MolToSmiles(mol, 1)
if allNodes is None:
allNodes = set()
if mSmi in allNodes:
return set()
activePool = {mSmi: mol}
allNodes.add(mSmi)
foundMols = {mSmi: mol}
for gpIdx, reactionGp in enumerate(reactions):
newPool = {}
while activePool:
matched = False
nSmi = next(iter(activePool))
mol = activePool.pop(nSmi)
for rxnIdx, reaction in enumerate(reactionGp):
if onlyUseReactions and (gpIdx, rxnIdx) not in onlyUseReactions:
continue
if not silent:
print('--------')
print(smartsGps[gpIdx][rxnIdx])
ps = reaction.RunReactants((mol, ))
if ps:
if not silent:
print(nSmi, '->', len(ps), 'products')
for prodSeq in ps:
seqOk = True
# we want to disqualify small fragments, so sort the product sequence by size
tSeq = [(prod.GetNumAtoms(onlyExplicit=True), idx)
for idx, prod in enumerate(prodSeq)]
tSeq.sort()
for nats, idx in tSeq:
prod = prodSeq[idx]
try:
Chem.SanitizeMol(prod)
except Exception:
continue
pSmi = Chem.MolToSmiles(prod, 1)
if minFragmentSize > 0:
nDummies = pSmi.count('*')
if nats - nDummies < minFragmentSize:
seqOk = False
break
prod.pSmi = pSmi
ts = [(x, prodSeq[y]) for x, y in tSeq]
prodSeq = ts
if seqOk:
matched = True
for nats, prod in prodSeq:
pSmi = prod.pSmi
# print('\t',nats,pSmi)
if pSmi not in allNodes:
if not singlePass:
activePool[pSmi] = prod
allNodes.add(pSmi)
foundMols[pSmi] = prod
if singlePass or keepNonLeafNodes or not matched:
newPool[nSmi] = mol
activePool = newPool
if not (singlePass or keepNonLeafNodes):
if not returnMols:
res = set(activePool.keys())
else:
res = activePool.values()
else:
if not returnMols:
res = allNodes
else:
res = foundMols.values()
return res
dummyPattern = Chem.MolFromSmiles('[*]')
def BRICSBuild(fragments, onlyCompleteMols=True, seeds=None, uniquify=True, scrambleReagents=True,
maxDepth=3):
seen = set()
if not seeds:
seeds = list(fragments)
if scrambleReagents:
seeds = list(seeds)
random.shuffle(seeds, random=random.random)
if scrambleReagents:
tempReactions = list(reverseReactions)
random.shuffle(tempReactions, random=random.random)
else:
tempReactions = reverseReactions
for seed in seeds:
seedIsR1 = False
seedIsR2 = False
nextSteps = []
for rxn in tempReactions:
if seed.HasSubstructMatch(rxn._matchers[0]):
seedIsR1 = True
if seed.HasSubstructMatch(rxn._matchers[1]):
seedIsR2 = True
for fragment in fragments:
ps = None
if fragment.HasSubstructMatch(rxn._matchers[0]):
if seedIsR2:
ps = rxn.RunReactants((fragment, seed))
if fragment.HasSubstructMatch(rxn._matchers[1]):
if seedIsR1:
ps = rxn.RunReactants((seed, fragment))
if ps:
for p in ps:
if uniquify:
pSmi = Chem.MolToSmiles(p[0], True)
if pSmi in seen:
continue
else:
seen.add(pSmi)
if p[0].HasSubstructMatch(dummyPattern):
nextSteps.append(p[0])
if not onlyCompleteMols:
yield p[0]
else:
yield p[0]
if nextSteps and maxDepth > 0:
for p in BRICSBuild(fragments, onlyCompleteMols=onlyCompleteMols, seeds=nextSteps,
uniquify=uniquify, maxDepth=maxDepth - 1,
scrambleReagents=scrambleReagents):
if uniquify:
pSmi = Chem.MolToSmiles(p, True)
if pSmi in seen:
continue
else:
seen.add(pSmi)
yield p
# ------- ------- ------- ------- ------- ------- ------- -------
# Begin testing code
# ------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest
import sys
return doctest.testmod(sys.modules["__main__"],
optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE)
if __name__ == '__main__':
import unittest
class TestCase(unittest.TestCase):
def test1(self):
m = Chem.MolFromSmiles('CC(=O)OC')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 2)
m = Chem.MolFromSmiles('CC(=O)N1CCC1=O')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 2, res)
m = Chem.MolFromSmiles('c1ccccc1N(C)C')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 2, res)
m = Chem.MolFromSmiles('c1cccnc1N(C)C')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 2, res)
m = Chem.MolFromSmiles('o1ccnc1N(C)C')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 2)
m = Chem.MolFromSmiles('c1ccccc1OC')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 2)
m = Chem.MolFromSmiles('o1ccnc1OC')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 2)
m = Chem.MolFromSmiles('O1CCNC1OC')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 2)
m = Chem.MolFromSmiles('CCCSCC')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 3, res)
self.assertTrue('[11*]S[11*]' in res, res)
m = Chem.MolFromSmiles('CCNC(=O)C1CC1')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 4, res)
self.assertTrue('[5*]N[5*]' in res, res)
def test2(self):
# example from the paper, nexavar:
m = Chem.MolFromSmiles(
'CNC(=O)C1=NC=CC(OC2=CC=C(NC(=O)NC3=CC(=C(Cl)C=C3)C(F)(F)F)C=C2)=C1')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 9, res)
def test3(self):
m = Chem.MolFromSmiles('FC(F)(F)C1=C(Cl)C=CC(NC(=O)NC2=CC=CC=C2)=C1')
res = BRICSDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res) == 5, res)
self.assertTrue('[5*]N[5*]' in res, res)
self.assertTrue('[16*]c1ccccc1' in res, res)
self.assertTrue('[8*]C(F)(F)F' in res, res)
def test4(self):
allNodes = set()
m = Chem.MolFromSmiles('c1ccccc1OCCC')
res = BRICSDecompose(m, allNodes=allNodes)
self.assertTrue(res)
leaves = res
self.assertTrue(len(leaves) == 3, leaves)
self.assertTrue(len(allNodes) == 6, allNodes)
res = BRICSDecompose(m, allNodes=allNodes)
self.assertFalse(res)
self.assertTrue(len(allNodes) == 6, allNodes)
m = Chem.MolFromSmiles('c1ccccc1OCCCC')
res = BRICSDecompose(m, allNodes=allNodes)
self.assertTrue(res)
leaves.update(res)
self.assertTrue(len(allNodes) == 9, allNodes)
self.assertTrue(len(leaves) == 4, leaves)
m = Chem.MolFromSmiles('c1cc(C(=O)NCC)ccc1OCCC')
res = BRICSDecompose(m, allNodes=allNodes)
self.assertTrue(res)
leaves.update(res)
self.assertTrue(len(leaves) == 8, leaves)
self.assertTrue(len(allNodes) == 18, allNodes)
def test5(self):
allNodes = set()
frags = [
'[14*]c1ncncn1',
'[16*]c1ccccc1',
'[14*]c1ncccc1',
]
frags = [Chem.MolFromSmiles(x) for x in frags]
res = BRICSBuild(frags)
self.assertTrue(res)
res = list(res)
self.assertTrue(len(res) == 6)
smis = [Chem.MolToSmiles(x, True) for x in res]
self.assertTrue('c1ccc(-c2ccccc2)cc1' in smis)
self.assertTrue('c1ccc(-c2ccccn2)cc1' in smis)
def test5a(self):
allNodes = set()
frags = [
'[3*]O[3*]',
'[16*]c1ccccc1',
]
frags = [Chem.MolFromSmiles(x) for x in frags]
res = BRICSBuild(frags)
self.assertTrue(res)
res = list(res)
smis = [Chem.MolToSmiles(x, True) for x in res]
self.assertTrue(len(smis) == 2, smis)
self.assertTrue('c1ccc(Oc2ccccc2)cc1' in smis)
self.assertTrue('c1ccc(-c2ccccc2)cc1' in smis)
def test6(self):
allNodes = set()
frags = [
'[16*]c1ccccc1',
'[3*]OC',
'[9*]n1cccc1',
]
frags = [Chem.MolFromSmiles(x) for x in frags]
res = BRICSBuild(frags)
self.assertTrue(res)
res = list(res)
self.assertTrue(len(res) == 3)
smis = [Chem.MolToSmiles(x, True) for x in res]
self.assertTrue('c1ccc(-c2ccccc2)cc1' in smis)
self.assertTrue('COc1ccccc1' in smis)
self.assertTrue('c1ccc(-n2cccc2)cc1' in smis, smis)
def test7(self):
allNodes = set()
frags = [
'[16*]c1ccccc1',
'[3*]OC',
'[3*]OCC(=O)[6*]',
]
frags = [Chem.MolFromSmiles(x) for x in frags]
res = BRICSBuild(frags)
self.assertTrue(res)
res = list(res)
smis = [Chem.MolToSmiles(x, True) for x in res]
self.assertTrue(len(res) == 3)
self.assertTrue('c1ccc(-c2ccccc2)cc1' in smis)
self.assertTrue('COc1ccccc1' in smis)
self.assertTrue('O=C(COc1ccccc1)c1ccccc1' in smis)
def test8(self):
random.seed(23)
base = Chem.MolFromSmiles("n1cncnc1OCC(C1CC1)OC1CNC1")
catalog = BRICSDecompose(base)
self.assertTrue(len(catalog) == 5, catalog)
catalog = [Chem.MolFromSmiles(x) for x in catalog]
ms = list(BRICSBuild(catalog, maxDepth=4, scrambleReagents=False))
for m in ms:
Chem.SanitizeMol(m)
ms = [Chem.MolToSmiles(x) for x in ms]
self.assertEqual(len(ms), 36)
ts = ['n1cnc(C2CNC2)nc1', 'n1cnc(-c2ncncn2)nc1', 'C(OC1CNC1)C(C1CC1)OC1CNC1',
'n1cnc(OC(COC2CNC2)C2CC2)nc1', 'n1cnc(OCC(OC2CNC2)C2CNC2)nc1']
ts = [Chem.MolToSmiles(Chem.MolFromSmiles(x), True) for x in ts]
for t in ts:
self.assertTrue(t in ms, (t, ms))
ms2 = list(BRICSBuild(catalog, maxDepth=4, scrambleReagents=False))
for m in ms2:
Chem.SanitizeMol(m)
ms2 = [Chem.MolToSmiles(x) for x in ms2]
self.assertEqual(ms, ms2)
ms2 = list(BRICSBuild(catalog, maxDepth=4, scrambleReagents=True))
for m in ms2:
Chem.SanitizeMol(m)
ms2 = [Chem.MolToSmiles(x) for x in ms2]
self.assertNotEqual(ms, ms2)
def test9(self):
m = Chem.MolFromSmiles('CCOc1ccccc1c1ncc(c2nc(NCCCC)ncn2)cc1')
res = BRICSDecompose(m)
self.assertEqual(len(res), 7)
self.assertTrue('[3*]O[3*]' in res)
self.assertFalse('[14*]c1ncnc(NCCCC)n1' in res)
res = BRICSDecompose(m, singlePass=True)
self.assertEqual(len(res), 13)
self.assertTrue('[3*]OCC' in res)
self.assertTrue('[14*]c1ncnc(NCCCC)n1' in res)
def test10(self):
m = Chem.MolFromSmiles('C1CCCCN1c1ccccc1')
res = BRICSDecompose(m)
self.assertEqual(len(res), 2, res)
def test11(self):
# test coordinate preservation:
molblock = """
RDKit 3D
13 14 0 0 0 0 0 0 0 0999 V2000
-1.2004 0.5900 0.6110 C 0 0 0 0 0 0 0 0 0 0 0 0
-2.2328 1.3173 0.0343 C 0 0 0 0 0 0 0 0 0 0 0 0
-3.4299 0.6533 -0.1500 C 0 0 0 0 0 0 0 0 0 0 0 0
-3.3633 -0.7217 -0.3299 C 0 0 0 0 0 0 0 0 0 0 0 0
-2.1552 -1.3791 -0.2207 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.1425 -0.7969 0.5335 C 0 0 0 0 0 0 0 0 0 0 0 0
0.1458 -1.4244 0.4108 O 0 0 0 0 0 0 0 0 0 0 0 0
1.2976 -0.7398 -0.1026 C 0 0 0 0 0 0 0 0 0 0 0 0
2.4889 -0.7939 0.5501 N 0 0 0 0 0 0 0 0 0 0 0 0
3.4615 0.1460 0.3535 C 0 0 0 0 0 0 0 0 0 0 0 0
3.0116 1.4034 -0.0296 C 0 0 0 0 0 0 0 0 0 0 0 0
1.9786 1.4264 -0.9435 C 0 0 0 0 0 0 0 0 0 0 0 0
1.1399 0.3193 -0.9885 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 2 0
2 3 1 0
3 4 2 0
4 5 1 0
5 6 2 0
6 7 1 0
7 8 1 0
8 9 2 0
9 10 1 0
10 11 2 0
11 12 1 0
12 13 2 0
6 1 1 0
13 8 1 0
M END
"""
m = Chem.MolFromMolBlock(molblock)
pieces = BreakBRICSBonds(m)
frags = Chem.GetMolFrags(pieces, asMols=True)
self.assertEqual(len(frags), 3)
self.assertEqual(frags[0].GetNumAtoms(), 7)
self.assertEqual(frags[1].GetNumAtoms(), 3)
self.assertEqual(frags[2].GetNumAtoms(), 7)
c1 = m.GetConformer()
c2 = frags[0].GetConformer()
for i in range(6):
p1 = c1.GetAtomPosition(i)
p2 = c2.GetAtomPosition(i)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(6)
p2 = c2.GetAtomPosition(6)
self.assertEqual((p1 - p2).Length(), 0.0)
c2 = frags[2].GetConformer()
for i in range(6):
p1 = c1.GetAtomPosition(i + 7)
p2 = c2.GetAtomPosition(i)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(6)
p2 = c2.GetAtomPosition(6)
self.assertEqual((p1 - p2).Length(), 0.0)
c2 = frags[1].GetConformer()
for i in range(1):
p1 = c1.GetAtomPosition(i + 6)
p2 = c2.GetAtomPosition(i)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(5)
p2 = c2.GetAtomPosition(1)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(6)
p2 = c2.GetAtomPosition(0)
self.assertEqual((p1 - p2).Length(), 0.0)
# make sure multiple conformations (include 2D) also work:
molblock = """
RDKit 2D
13 14 0 0 0 0 0 0 0 0999 V2000
-1.2990 -0.8654 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-2.5981 -1.6154 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-3.8971 -0.8654 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-3.8971 0.6346 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-2.5981 1.3846 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.2990 0.6346 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.0000 1.3846 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
1.2990 0.6346 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1.2990 -0.8654 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0
2.5981 -1.6154 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
3.8971 -0.8654 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
3.8971 0.6346 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
2.5981 1.3846 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 2 0
2 3 1 0
3 4 2 0
4 5 1 0
5 6 2 0
6 7 1 0
7 8 1 0
8 9 2 0
9 10 1 0
10 11 2 0
11 12 1 0
12 13 2 0
6 1 1 0
13 8 1 0
M END
"""
m2 = Chem.MolFromMolBlock(molblock)
m.AddConformer(m2.GetConformer(), assignId=True)
self.assertEqual(m.GetNumConformers(), 2)
pieces = BreakBRICSBonds(m)
frags = Chem.GetMolFrags(pieces, asMols=True)
self.assertEqual(len(frags), 3)
self.assertEqual(frags[0].GetNumAtoms(), 7)
self.assertEqual(frags[1].GetNumAtoms(), 3)
self.assertEqual(frags[2].GetNumAtoms(), 7)
self.assertEqual(frags[0].GetNumConformers(), 2)
self.assertEqual(frags[1].GetNumConformers(), 2)
self.assertEqual(frags[2].GetNumConformers(), 2)
c1 = m.GetConformer(0)
c2 = frags[0].GetConformer(0)
for i in range(6):
p1 = c1.GetAtomPosition(i)
p2 = c2.GetAtomPosition(i)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(6)
p2 = c2.GetAtomPosition(6)
self.assertEqual((p1 - p2).Length(), 0.0)
c2 = frags[2].GetConformer(0)
for i in range(6):
p1 = c1.GetAtomPosition(i + 7)
p2 = c2.GetAtomPosition(i)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(6)
p2 = c2.GetAtomPosition(6)
self.assertEqual((p1 - p2).Length(), 0.0)
c2 = frags[1].GetConformer(0)
for i in range(1):
p1 = c1.GetAtomPosition(i + 6)
p2 = c2.GetAtomPosition(i)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(5)
p2 = c2.GetAtomPosition(1)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(6)
p2 = c2.GetAtomPosition(0)
self.assertEqual((p1 - p2).Length(), 0.0)
c1 = m.GetConformer(1)
c2 = frags[0].GetConformer(1)
for i in range(6):
p1 = c1.GetAtomPosition(i)
p2 = c2.GetAtomPosition(i)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(6)
p2 = c2.GetAtomPosition(6)
self.assertEqual((p1 - p2).Length(), 0.0)
c2 = frags[2].GetConformer(1)
for i in range(6):
p1 = c1.GetAtomPosition(i + 7)
p2 = c2.GetAtomPosition(i)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(6)
p2 = c2.GetAtomPosition(6)
self.assertEqual((p1 - p2).Length(), 0.0)
c2 = frags[1].GetConformer(1)
for i in range(1):
p1 = c1.GetAtomPosition(i + 6)
p2 = c2.GetAtomPosition(i)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(5)
p2 = c2.GetAtomPosition(1)
self.assertEqual((p1 - p2).Length(), 0.0)
p1 = c1.GetAtomPosition(6)
p2 = c2.GetAtomPosition(0)
self.assertEqual((p1 - p2).Length(), 0.0)
def test12(self):
m = Chem.MolFromSmiles('CCS(=O)(=O)NCC')
res = list(FindBRICSBonds(m))
self.assertEqual(len(res), 2, res)
atIds = [x[0] for x in res]
atIds.sort()
self.assertEqual(atIds, [(5, 2), (6, 5)])
def testGithub1734(self):
m = Chem.MolFromSmiles('c1ccccc1[C@H](C)NC')
res = BRICSDecompose(m)
self.assertEqual(len(res), 3)
self.assertTrue('[4*][C@H]([8*])C' in res)
res = BreakBRICSBonds(m)
self.assertEqual(Chem.MolToSmiles(res, isomericSmiles=True),
'[16*]c1ccccc1.[4*][C@H]([8*])C.[5*]NC')
failed, tried = _test()
if failed:
sys.exit(failed)
unittest.main()
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from pyecs import *
from pyecs.components import *
from collections import defaultdict
from testing import *
class TestEntity():
def test__reset_global(self):
Entity.__uid__ = "foo"
Entity.__tags__ = "bar"
assert hasattr(Entity, "__uid__")
assert hasattr(Entity, "__tags__")
assert type(Entity.__uid__) != int
assert type(Entity.__tags__) != defaultdict
Entity._reset_global()
assert hasattr(Entity, "__uid__")
assert hasattr(Entity, "__tags__")
assert type(Entity.__uid__) == int
assert type(Entity.__tags__) == defaultdict
assert Entity.__uid__ == 0
assert Entity.__tags__ == defaultdict(set)
def test_entity_static_members(self):
Entity._reset_global()
e = Entity()
assert hasattr(Entity, "__uid__")
assert type(Entity.__uid__) == int
assert hasattr(Entity, "__tags__")
assert type(Entity.__tags__) == defaultdict
def test_initial_state(self):
Entity._reset_global()
e = Entity()
assert hasattr(e, "uid")
assert hasattr(e, "parent")
assert hasattr(e, "children")
assert hasattr(e, "tags")
assert hasattr(e, "components")
assert type(e.uid) == int
assert type(e.components) == defaultdict
assert type(e.children) == list
assert type(e.tags) == set
assert e.parent == None
assert len(list(e.components.iterkeys())) == 0
assert len(e.children) == 0
assert len(e.tags) == 0
def test_add_tag(self):
Entity._reset_global()
e = Entity()
assert "foo" not in e.tags
assert "foo" not in Entity.__tags__
assert e not in Entity.__tags__["foo"]
e.add_tag("foo")
assert "foo" in e.tags
assert "foo" in Entity.__tags__
assert e in Entity.__tags__["foo"]
def test_remove_tag(self):
Entity._reset_global()
e = Entity()
assert "foo" not in e.tags
assert "foo" not in Entity.__tags__
assert e not in Entity.__tags__["foo"]
e.add_tag("foo")
assert "foo" in e.tags
assert "foo" in Entity.__tags__
assert e in Entity.__tags__["foo"]
e.remove_tag("foo")
assert "foo" not in e.tags
assert "foo" in Entity.__tags__ # because __tags__ is a defaultdict(set) we only removed from the set
assert e not in Entity.__tags__["foo"]
def test_has_tag(self):
Entity._reset_global()
e = Entity()
assert e.has_tag("foo") == False
e.add_tag("foo")
assert e.has_tag("foo") == True
def test_add_component_when_added_does_nothing_and_returns_None(self):
Entity._reset_global()
e = Entity()
c = Component()
e.add_component(c)
i = len(e.components[type(c)])
assert e.add_component(c) == None
assert len(e.components[type(c)]) == i
def test_add_component_returns_component(self):
Entity._reset_global()
e = Entity()
c = Component()
assert e.add_component(c) == c
def test_add_component(self):
Entity._reset_global()
Component._reset_global()
e = Entity()
c = Component()
assert c not in e.components[type(c)]
assert c not in Component.__added_components__[type(c)]
e.add_component(c)
assert c in e.components[type(c)]
assert c in Component.__added_components__[type(c)]
def test_add_component_callbacks(self):
Entity._reset_global()
e = Entity()
c = Component()
def component_added(component,entity):
assert e == entity
assert c == component
component_added.called = True
def component_attached():
component_attached.called = True
e.register_callback("component_added", component_added)
c.register_callback("component_attached", component_attached)
component_added.called = False
component_attached.called = False
e.add_component(c)
assert component_added.called
assert component_attached.called
def test_remove_component(self):
Entity._reset_global()
Component._reset_global()
e = Entity()
c = Component()
e.add_component(c)
assert c in e.components[type(c)]
assert c in Component.__added_components__[type(c)]
e.remove_component(c)
assert c not in e.components[type(c)]
assert c not in Component.__added_components__[type(c)]
def test_remove_component_callbacks(self):
Entity._reset_global()
e = Entity()
c = Component()
def component_removed(component,entity):
assert e == entity
assert c == component
component_removed.called = True
def component_detached(entity):
assert e == entity
component_detached.called = True
e.register_callback("component_removed", component_removed)
c.register_callback("component_detached", component_detached)
e.add_component(c)
component_removed.called = False
component_detached.called = False
e.remove_component(c)
assert component_removed.called
assert component_detached.called
def test_find_parent_entity_with_component(self):
e0 = Entity()
e1 = Entity()
e2 = Entity()
e3 = Entity()
e4 = Entity()
c = Component()
e0.add_entity(e1)
e1.add_entity(e2)
e2.add_entity(e3)
e3.add_entity(e4)
e2.add_component(c)
assert e0.find_parent_entity_with_component(type(c)) == None
assert e1.find_parent_entity_with_component(type(c)) == None
assert e2.find_parent_entity_with_component(type(c)) == None
assert e3.find_parent_entity_with_component(type(c)) == e2
assert e4.find_parent_entity_with_component(type(c)) == e2
def test_add_entity(self):
e_parent = Entity()
e_child = Entity()
def entity_added(parent,entity):
assert parent == e_parent
assert entity == e_child
entity_added.called = True
assert e_child not in e_parent.children
assert e_child.parent != e_parent
entity_added.called = False
e_child.register_callback("entity_added",entity_added)
e_parent.add_entity(e_child)
assert e_child in e_parent.children
assert e_child.parent == e_parent
assert entity_added.called
def test_remove_entity_result(self):
e_parent = Entity()
e_child = Entity()
assert e_parent.remove_entity(e_child) == False
e_parent.add_entity(e_child)
assert e_parent.remove_entity(e_child) == True
assert e_parent.remove_entity(e_child) == False
def test_remove_entity(self):
e_parent = Entity()
e_child = Entity()
def entity_removed(parent,entity):
assert parent == e_parent
assert entity == e_child
entity_removed.called = True
e_child.register_callback("entity_removed",entity_removed)
e_parent.add_entity(e_child)
assert e_child.parent is not None
assert e_child in e_parent.children
entity_removed.called = False
e_parent.remove_entity(e_child)
assert entity_removed.called
assert e_child.parent is None
assert e_child not in e_parent.children
def test_remove_from_parent(self):
e_parent = Entity()
e_child = Entity()
def entity_removed(parent,entity):
assert parent == e_parent
assert entity == e_child
entity_removed.called = True
e_child.register_callback("entity_removed",entity_removed)
e_parent.add_entity(e_child)
assert e_child.parent is not None
assert e_child in e_parent.children
entity_removed.called = False
e_child.remove_from_parent()
assert entity_removed.called
assert e_child.parent is None
assert e_child not in e_parent.children
def test_remove_entity_result_from_parent(self):
e_parent = Entity()
e_child = Entity()
assert e_child.remove_from_parent() == False
e_parent.add_entity(e_child)
assert e_child.remove_from_parent() == True
assert e_child.remove_from_parent() == False
def test_has_component(self):
e = Entity()
c = Component()
assert not e.has_component(type(c))
e.add_component(c)
assert e.has_component(type(c))
e.remove_component(c)
assert not e.has_component(type(c))
def test_get_component(self):
e = Entity()
c0 = Component()
c1 = Component()
assert e.get_component(type(c0)) == None
assert e.get_component(Component) == None
e.add_component(c0)
assert e.get_component(Component) == c0
e.add_component(c1)
assert e.get_component(Component) == c0
e.remove_component(c0)
assert e.get_component(Component) == c1
e.add_component(c0)
assert e.get_component(Component) == c1
e.remove_component(c0)
e.remove_component(c1)
assert e.get_component(Component) == None
e.add_component(c0)
e.add_component(c1)
assert e.get_component(Component) == c0
e.remove_component(c1)
assert e.get_component(Component) == c0
def test_find_root(self):
e0 = Entity()
e1 = Entity()
e2 = Entity()
e3 = Entity()
e4 = Entity()
e0.add_entity(e1)
e1.add_entity(e2)
e2.add_entity(e3)
e2.add_entity(e4)
assert e0.find_root() == e0
assert e1.find_root() == e0
assert e2.find_root() == e0
assert e3.find_root() == e0
assert e4.find_root() == e0
def test_entity_path(self):
e0 = Entity()
e1 = Entity()
e2 = Entity()
e3 = Entity()
e4 = Entity()
e0.add_entity(e1)
e1.add_entity(e2)
e2.add_entity(e3)
e2.add_entity(e4)
assert list(e0.entity_path()) == [e0]
assert list(e1.entity_path()) == [e0,e1]
assert list(e2.entity_path()) == [e0,e1,e2]
assert list(e3.entity_path()) == [e0,e1,e2,e3]
assert list(e4.entity_path()) == [e0,e1,e2,e4]
def test_uid(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
assert e0.uid == 0
assert e1.uid == 1
assert e2.uid == 2
def test_uid_path(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e3 = Entity()
e4 = Entity()
e0.add_entity(e1)
e1.add_entity(e2)
e2.add_entity(e3)
e2.add_entity(e4)
assert e0.uid_path() == "0"
assert e1.uid_path() == "0.1"
assert e2.uid_path() == "0.1.2"
assert e3.uid_path() == "0.1.2.3"
assert e4.uid_path() == "0.1.2.4"
@forEach("i",generateNaturalIntegers,2**5)
def test_find_all_entities_with_component(self,i):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e3 = Entity()
e4 = Entity()
c = Component()
e0.add_entity(e1)
e1.add_entity(e2)
e2.add_entity(e3)
e2.add_entity(e4)
es = []
# assert find_all_entities_with_component(..) - es == []
for e in [e0,e1,e2,e3,e4]:
assert [item for item in e.find_all_entities_with_component(Component) if item not in es] == []
for k,e in enumerate([e0,e1,e2,e3,e4]):
# if k'th bit is set in i
if (1 << k) & i == (1 << k):
e.add_component((c))
es.append(e)
# assert find_all_entities_with_component(..) - es == []
for e in [e0,e1,e2,e3,e4]:
assert [item for item in e.find_all_entities_with_component(Component) if item not in es] == []
def test_find_entities_with_component(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e3 = Entity()
e4 = Entity()
c = Component()
e0.add_entity(e1)
e1.add_entity(e2)
e2.add_entity(e3)
e2.add_entity(e4)
e1.add_component(c)
e2.add_component(c)
es = [e1,e2]
assert [item for item in e0.find_entities_with_component(Component) if item not in [e1,e2]] == []
assert e1 in e0.find_entities_with_component(Component)
assert e2 in e0.find_entities_with_component(Component)
assert [item for item in e1.find_entities_with_component(Component) if item not in [e1,e2]] == []
assert e1 in e1.find_entities_with_component(Component)
assert e2 in e1.find_entities_with_component(Component)
assert [item for item in e2.find_entities_with_component(Component) if item not in [e2]] == []
assert e2 in e2.find_entities_with_component(Component)
assert e3.find_entities_with_component(Component) == []
assert e4.find_entities_with_component(Component) == []
def test_find_entity_with_component(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e3 = Entity()
e4 = Entity()
c = Component()
e0.add_entity(e1)
e1.add_entity(e2)
e2.add_entity(e3)
e2.add_entity(e4)
e1.add_component(c)
e2.add_component(c)
assert e0.find_entity_with_component(Component) == e1
assert e1.find_entity_with_component(Component) == e1
assert e2.find_entity_with_component(Component) == e2
assert e3.find_entity_with_component(Component) == None
assert e4.find_entity_with_component(Component) == None
def test_first_or_none(self):
assert Entity.first_or_none([]) == None
assert Entity.first_or_none([1]) == 1
assert Entity.first_or_none([1,2]) == 1
assert Entity.first_or_none([4,3,2,1]) == 4
assert Entity.first_or_none([None,3,2,1]) == None # note this!
def test_find_entities_with_tag(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e0.add_tag("foo")
assert type(Entity.find_entities_with_tag("foo")) == set
assert Entity.find_entities_with_tag("foo") == set([e0])
e1.add_tag("foo")
assert Entity.find_entities_with_tag("foo") == set([e0,e1])
@forEach("i",lambda:iter(range(10)))
@discardParameter("i")
def test_find_entity_with_tag(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
assert Entity.find_entity_with_tag("foo") == None
e0.add_tag("foo")
assert Entity.find_entity_with_tag("foo") == e0
e1.add_tag("foo")
assert Entity.find_entity_with_tag("foo") in [e0,e1]
e0.remove_tag("foo")
assert Entity.find_entity_with_tag("foo") == e1
e1.remove_tag("foo")
assert Entity.find_entity_with_tag("foo") == None
def test_find_entities_with_tags(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e0.add_tag("foo")
assert type(Entity.find_entities_with_tags(["foo"])) == set
assert Entity.find_entities_with_tags(["foo"]) == set([e0])
e1.add_tag("foo")
assert Entity.find_entities_with_tags(["foo"]) == set([e0,e1])
assert Entity.find_entities_with_tags(["foo","bar"]) == set([])
e0.add_tag("bar")
assert Entity.find_entities_with_tags(["foo","bar"]) == set([e0])
e1.add_tag("bar")
assert Entity.find_entities_with_tags(["foo","bar"]) == set([e0,e1])
@forEach("i",lambda:iter(range(10)))
@discardParameter("i")
def test_find_entity_with_tags(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e0.add_tag("foo")
assert Entity.find_entity_with_tags(["foo"]) == e0
e1.add_tag("foo")
assert Entity.find_entity_with_tags(["foo"]) in set([e0,e1])
assert Entity.find_entity_with_tags(["foo","bar"]) == None
e0.add_tag("bar")
assert Entity.find_entity_with_tags(["foo","bar"]) == e0
e1.add_tag("bar")
assert Entity.find_entity_with_tags(["foo","bar"]) in set([e0,e1])
def test_find_entities(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e3 = Entity()
e4 = Entity()
e0.add_entity(e1)
e1.add_entity(e2)
e2.add_entity(e3)
e2.add_entity(e4)
assert e0.find_entities(lambda e:True) == [e0,e1,e2,e3,e4]
assert e1.find_entities(lambda e:True) == [e1,e2,e3,e4]
assert e2.find_entities(lambda e:True) == [e2,e3,e4]
assert e3.find_entities(lambda e:True) == [e3]
assert e4.find_entities(lambda e:True) == [e4]
assert e0.find_entities(lambda e:False) == []
assert e1.find_entities(lambda e:False) == []
assert e2.find_entities(lambda e:False) == []
assert e3.find_entities(lambda e:False) == []
assert e4.find_entities(lambda e:False) == []
e0.flag = False
e1.flag = True
e2.flag = True
e3.flag = False
e4.flag = False
assert e0.find_entities(lambda e:e.flag) == [e1,e2]
assert e1.find_entities(lambda e:e.flag) == [e1,e2]
assert e2.find_entities(lambda e:e.flag) == [e2]
assert e3.find_entities(lambda e:e.flag) == []
assert e4.find_entities(lambda e:e.flag) == []
def test_traverse_entities(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e3 = Entity()
e4 = Entity()
e0.add_entity(e1)
e1.add_entity(e2)
e2.add_entity(e3)
e2.add_entity(e4)
def traverse_entities(e):
traverse_entities.es.append(e)
traverse_entities.es = []
e0.traverse_entities(traverse_entities)
assert traverse_entities.es == [e0,e1,e2,e3,e4]
traverse_entities.es = []
e1.traverse_entities(traverse_entities)
assert traverse_entities.es == [e1,e2,e3,e4]
traverse_entities.es = []
e2.traverse_entities(traverse_entities)
assert traverse_entities.es == [e2,e3,e4]
traverse_entities.es = []
e3.traverse_entities(traverse_entities)
assert traverse_entities.es == [e3]
traverse_entities.es = []
e4.traverse_entities(traverse_entities)
assert traverse_entities.es == [e4]
def test_traverse_entities_accum(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e3 = Entity()
e4 = Entity()
e0.add_entity(e1)
e1.add_entity(e2)
e2.add_entity(e3)
e2.add_entity(e4)
def traverse_entities_accum(e,accum):
accum.append(e)
return accum
assert e0.traverse_entities_accum(traverse_entities_accum,[]) == [e0,e1,e2,e3,e4]
assert e1.traverse_entities_accum(traverse_entities_accum,[]) == [e1,e2,e3,e4]
assert e2.traverse_entities_accum(traverse_entities_accum,[]) == [e2,e3,e4]
assert e3.traverse_entities_accum(traverse_entities_accum,[]) == [e3]
assert e4.traverse_entities_accum(traverse_entities_accum,[]) == [e4]
def test_all_components(self):
Entity._reset_global()
e = Entity()
class Component1(Component):
def __init__(self, *args, **kwargs):
super(Component1, self).__init__(*args, **kwargs)
class Component2(Component):
def __init__(self, *args, **kwargs):
super(Component2, self).__init__(*args, **kwargs)
assert set(e.all_components()) == set([])
c1 = e.add_component(Component1())
assert set(e.all_components()) == set([c1])
c2 = e.add_component(Component2())
assert set(e.all_components()) == set([c1,c2])
c3 = e.add_component(Component2())
assert set(e.all_components()) == set([c1,c2,c3])
e.remove_component(c1)
assert set(e.all_components()) == set([c2,c3])
e.remove_component(c2)
assert set(e.all_components()) == set([c3])
e.remove_component(c3)
assert set(e.all_components()) == set([])
def test_print_components(self,capfd):
Entity._reset_global()
e = Entity()
class Component1(Component):
def __init__(self, *args, **kwargs):
super(Component1, self).__init__(*args, **kwargs)
def __str__(self):
return str(("Component1", id(self)))
class Component2(Component):
def __init__(self, *args, **kwargs):
super(Component2, self).__init__(*args, **kwargs)
def __str__(self):
return str(("Component2", id(self)))
c1 = e.add_component(Component1())
c2 = e.add_component(Component2())
c3 = e.add_component(Component2())
res = e.print_components(True)
assert "Component1" in res
assert str(id(c1)) in res
assert "Component2" in res
assert str(id(c2)) in res
assert "Component2" in res
assert str(id(c3)) in res
e.print_components()
out, err = capfd.readouterr()
assert out == res + "\n"
def test___str__(self):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e0.add_entity(e1)
e1.add_entity(e2)
class Component1(Component):
def __init__(self, *args, **kwargs):
super(Component1, self).__init__(*args, **kwargs)
def __str__(self):
return str("Component1")
e2.add_component(Component1())
e2.add_component(Component1())
assert str(e0) == "Entity 0"
assert str(e1) == "Entity 0.1"
assert str(e2) == "Entity 0.1.2 Component1, Component1"
def test_print_structure(self,capfd):
Entity._reset_global()
e0 = Entity()
e1 = Entity()
e2 = Entity()
e0.add_entity(e1)
e1.add_entity(e2)
class Component1(Component):
def __init__(self, *args, **kwargs):
super(Component1, self).__init__(*args, **kwargs)
def __str__(self):
return str("Component1")
e2.add_component(Component1())
e2.add_component(Component1())
res = e0.print_structure(True)
assert res == "0 \n" + \
"0.1 \n" + \
"0.1.2 Component1, Component1"
e0.print_structure()
out, err = capfd.readouterr()
assert out == res + "\n"
def test_remove_component_removes_callbacks(self):
e=Entity()
class Component1(Component):
def __init__(self, *args, **kwargs):
super(Component1, self).__init__(*args, **kwargs)
self.foobar_called = False
@callback
def foobar(self):
self.foobar_called = True
c = Component1()
e.add_component(c)
e.fire_callbacks("foobar")
assert c.foobar_called == True
e.remove_component(c)
c.foobar_called = False
e.fire_callbacks("foobar")
assert c.foobar_called == False
def test_readded_component_callbacks(self):
e=Entity()
class Component1(Component):
def __init__(self, *args, **kwargs):
super(Component1, self).__init__(*args, **kwargs)
self.foobar_called = False
@callback
def foobar(self):
self.foobar_called = True
c = Component1()
e.add_component(c)
c.foobar_called = False
e.fire_callbacks("foobar")
assert c.foobar_called == True
e.remove_component(c)
c.foobar_called = False
e.fire_callbacks("foobar")
assert c.foobar_called == False
e.add_component(c)
c.foobar_called = False
e.fire_callbacks("foobar")
assert c.foobar_called == True
def test_remove_component_doesnt_remove_component_callbacks(self):
e=Entity()
class Component1(Component):
def __init__(self, *args, **kwargs):
super(Component1, self).__init__(*args, **kwargs)
self.foobar_called = False
@component_callback
def foobar(self):
self.foobar_called = True
c = Component1()
e.add_component(c)
c.foobar_called = False
c.fire_callbacks("foobar")
assert c.foobar_called == True
e.remove_component(c)
c.foobar_called = False
c.fire_callbacks("foobar")
assert c.foobar_called == True
| |
import os
import subprocess
import sys
from collections import defaultdict
from typing import Any, ClassVar, Dict, Type
from urllib.parse import urljoin
from .wptmanifest.parser import atoms
atom_reset = atoms["Reset"]
enabled_tests = {"testharness", "reftest", "wdspec", "crashtest", "print-reftest"}
class Result(object):
def __init__(self,
status,
message,
expected=None,
extra=None,
stack=None,
known_intermittent=None):
if status not in self.statuses:
raise ValueError("Unrecognised status %s" % status)
self.status = status
self.message = message
self.expected = expected
self.known_intermittent = known_intermittent if known_intermittent is not None else []
self.extra = extra if extra is not None else {}
self.stack = stack
def __repr__(self):
return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.status)
class SubtestResult(object):
def __init__(self, name, status, message, stack=None, expected=None, known_intermittent=None):
self.name = name
if status not in self.statuses:
raise ValueError("Unrecognised status %s" % status)
self.status = status
self.message = message
self.stack = stack
self.expected = expected
self.known_intermittent = known_intermittent if known_intermittent is not None else []
def __repr__(self):
return "<%s.%s %s %s>" % (self.__module__, self.__class__.__name__, self.name, self.status)
class TestharnessResult(Result):
default_expected = "OK"
statuses = {"OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH", "PRECONDITION_FAILED"}
class TestharnessSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = {"PASS", "FAIL", "TIMEOUT", "NOTRUN", "PRECONDITION_FAILED"}
class ReftestResult(Result):
default_expected = "PASS"
statuses = {"PASS", "FAIL", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT",
"CRASH"}
class WdspecResult(Result):
default_expected = "OK"
statuses = {"OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"}
class WdspecSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = {"PASS", "FAIL", "ERROR"}
class CrashtestResult(Result):
default_expected = "PASS"
statuses = {"PASS", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT",
"CRASH"}
def get_run_info(metadata_root, product, **kwargs):
return RunInfo(metadata_root, product, **kwargs)
class RunInfo(Dict[str, Any]):
def __init__(self, metadata_root, product, debug,
browser_version=None,
browser_channel=None,
verify=None,
extras=None,
enable_webrender=False,
device_serials=None,
adb_binary=None):
import mozinfo
self._update_mozinfo(metadata_root)
self.update(mozinfo.info)
from .update.tree import GitTree
try:
# GitTree.__init__ throws if we are not in a git tree.
rev = GitTree(log_error=False).rev
except (OSError, subprocess.CalledProcessError):
rev = None
if rev:
self["revision"] = rev.decode("utf-8")
self["python_version"] = sys.version_info.major
self["product"] = product
if debug is not None:
self["debug"] = debug
elif "debug" not in self:
# Default to release
self["debug"] = False
if browser_version:
self["browser_version"] = browser_version
if browser_channel:
self["browser_channel"] = browser_channel
self["verify"] = verify
if "wasm" not in self:
self["wasm"] = False
if extras is not None:
self.update(extras)
if "headless" not in self:
self["headless"] = False
self["webrender"] = enable_webrender
if adb_binary:
self["adb_binary"] = adb_binary
if device_serials:
# Assume all emulators are identical, so query an arbitrary one.
self._update_with_emulator_info(device_serials[0])
self.pop("linux_distro", None)
def _adb_run(self, device_serial, args, **kwargs):
adb_binary = self.get("adb_binary", "adb")
cmd = [adb_binary, "-s", device_serial, *args]
return subprocess.check_output(cmd, **kwargs)
def _adb_get_property(self, device_serial, prop, **kwargs):
args = ["shell", "getprop", prop]
value = self._adb_run(device_serial, args, **kwargs)
return value.strip()
def _update_with_emulator_info(self, device_serial):
"""Override system info taken from the host if using an Android
emulator."""
try:
self._adb_run(device_serial, ["wait-for-device"])
emulator_info = {
"os": "android",
"os_version": self._adb_get_property(
device_serial,
"ro.build.version.release",
encoding="utf-8",
),
}
emulator_info["version"] = emulator_info["os_version"]
# Detect CPU info (https://developer.android.com/ndk/guides/abis#sa)
abi64, *_ = self._adb_get_property(
device_serial,
"ro.product.cpu.abilist64",
encoding="utf-8",
).split(',')
if abi64:
emulator_info["processor"] = abi64
emulator_info["bits"] = 64
else:
emulator_info["processor"], *_ = self._adb_get_property(
device_serial,
"ro.product.cpu.abilist32",
encoding="utf-8",
).split(',')
emulator_info["bits"] = 32
self.update(emulator_info)
except (OSError, subprocess.CalledProcessError):
pass
def _update_mozinfo(self, metadata_root):
"""Add extra build information from a mozinfo.json file in a parent
directory"""
import mozinfo
path = metadata_root
dirs = set()
while path != os.path.expanduser('~'):
if path in dirs:
break
dirs.add(str(path))
path = os.path.dirname(path)
mozinfo.find_and_update_from_json(*dirs)
def server_protocol(manifest_item):
if hasattr(manifest_item, "h2") and manifest_item.h2:
return "h2"
if hasattr(manifest_item, "https") and manifest_item.https:
return "https"
return "http"
class Test(object):
result_cls = None # type: ClassVar[Type[Result]]
subtest_result_cls = None # type: ClassVar[Type[SubtestResult]]
test_type = None # type: ClassVar[str]
default_timeout = 10 # seconds
long_timeout = 60 # seconds
def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata,
timeout=None, path=None, protocol="http", subdomain=False):
self.url_base = url_base
self.tests_root = tests_root
self.url = url
self._inherit_metadata = inherit_metadata
self._test_metadata = test_metadata
self.timeout = timeout if timeout is not None else self.default_timeout
self.path = path
self.subdomain = subdomain
self.environment = {"url_base": url_base,
"protocol": protocol,
"prefs": self.prefs}
def __eq__(self, other):
if not isinstance(other, Test):
return False
return self.id == other.id
# Python 2 does not have this delegation, while Python 3 does.
def __ne__(self, other):
return not self.__eq__(other)
def update_metadata(self, metadata=None):
if metadata is None:
metadata = {}
return metadata
@classmethod
def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata):
timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
return cls(manifest_file.url_base,
manifest_file.tests_root,
manifest_item.url,
inherit_metadata,
test_metadata,
timeout=timeout,
path=os.path.join(manifest_file.tests_root, manifest_item.path),
protocol=server_protocol(manifest_item),
subdomain=manifest_item.subdomain)
@property
def id(self):
return self.url
@property
def keys(self):
return tuple()
@property
def abs_path(self):
return os.path.join(self.tests_root, self.path)
def _get_metadata(self, subtest=None):
if self._test_metadata is not None and subtest is not None:
return self._test_metadata.get_subtest(subtest)
else:
return self._test_metadata
def itermeta(self, subtest=None):
if self._test_metadata is not None:
if subtest is not None:
subtest_meta = self._get_metadata(subtest)
if subtest_meta is not None:
yield subtest_meta
yield self._get_metadata()
for metadata in reversed(self._inherit_metadata):
yield metadata
def disabled(self, subtest=None):
for meta in self.itermeta(subtest):
disabled = meta.disabled
if disabled is not None:
return disabled
return None
@property
def restart_after(self):
for meta in self.itermeta(None):
restart_after = meta.restart_after
if restart_after is not None:
return True
return False
@property
def leaks(self):
for meta in self.itermeta(None):
leaks = meta.leaks
if leaks is not None:
return leaks
return False
@property
def min_assertion_count(self):
for meta in self.itermeta(None):
count = meta.min_assertion_count
if count is not None:
return count
return 0
@property
def max_assertion_count(self):
for meta in self.itermeta(None):
count = meta.max_assertion_count
if count is not None:
return count
return 0
@property
def lsan_disabled(self):
for meta in self.itermeta():
if meta.lsan_disabled is not None:
return meta.lsan_disabled
return False
@property
def lsan_allowed(self):
lsan_allowed = set()
for meta in self.itermeta():
lsan_allowed |= meta.lsan_allowed
if atom_reset in lsan_allowed:
lsan_allowed.remove(atom_reset)
break
return lsan_allowed
@property
def lsan_max_stack_depth(self):
for meta in self.itermeta(None):
depth = meta.lsan_max_stack_depth
if depth is not None:
return depth
return None
@property
def mozleak_allowed(self):
mozleak_allowed = set()
for meta in self.itermeta():
mozleak_allowed |= meta.leak_allowed
if atom_reset in mozleak_allowed:
mozleak_allowed.remove(atom_reset)
break
return mozleak_allowed
@property
def mozleak_threshold(self):
rv = {}
for meta in self.itermeta(None):
threshold = meta.leak_threshold
for key, value in threshold.items():
if key not in rv:
rv[key] = value
return rv
@property
def tags(self):
tags = set()
for meta in self.itermeta():
meta_tags = meta.tags
tags |= meta_tags
if atom_reset in meta_tags:
tags.remove(atom_reset)
break
tags.add("dir:%s" % self.id.lstrip("/").split("/")[0])
return tags
@property
def prefs(self):
prefs = {}
for meta in reversed(list(self.itermeta())):
meta_prefs = meta.prefs
if atom_reset in meta_prefs:
del meta_prefs[atom_reset]
prefs = {}
prefs.update(meta_prefs)
return prefs
def expected(self, subtest=None):
if subtest is None:
default = self.result_cls.default_expected
else:
default = self.subtest_result_cls.default_expected
metadata = self._get_metadata(subtest)
if metadata is None:
return default
try:
expected = metadata.get("expected")
if isinstance(expected, str):
return expected
elif isinstance(expected, list):
return expected[0]
elif expected is None:
return default
except KeyError:
return default
def implementation_status(self):
implementation_status = None
for meta in self.itermeta():
implementation_status = meta.implementation_status
if implementation_status:
return implementation_status
# assuming no specific case, we are implementing it
return "implementing"
def known_intermittent(self, subtest=None):
metadata = self._get_metadata(subtest)
if metadata is None:
return []
try:
expected = metadata.get("expected")
if isinstance(expected, list):
return expected[1:]
return []
except KeyError:
return []
def expect_any_subtest_status(self):
metadata = self._get_metadata()
if metadata is None:
return False
try:
# This key is used by the Blink CI to ignore subtest statuses
metadata.get("blink_expect_any_subtest_status")
return True
except KeyError:
return False
def __repr__(self):
return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.id)
class TestharnessTest(Test):
result_cls = TestharnessResult
subtest_result_cls = TestharnessSubtestResult
test_type = "testharness"
def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata,
timeout=None, path=None, protocol="http", testdriver=False,
jsshell=False, scripts=None, subdomain=False):
Test.__init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, timeout,
path, protocol, subdomain)
self.testdriver = testdriver
self.jsshell = jsshell
self.scripts = scripts or []
@classmethod
def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata):
timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
testdriver = manifest_item.testdriver if hasattr(manifest_item, "testdriver") else False
jsshell = manifest_item.jsshell if hasattr(manifest_item, "jsshell") else False
script_metadata = manifest_item.script_metadata or []
scripts = [v for (k, v) in script_metadata
if k == "script"]
return cls(manifest_file.url_base,
manifest_file.tests_root,
manifest_item.url,
inherit_metadata,
test_metadata,
timeout=timeout,
path=os.path.join(manifest_file.tests_root, manifest_item.path),
protocol=server_protocol(manifest_item),
testdriver=testdriver,
jsshell=jsshell,
scripts=scripts,
subdomain=manifest_item.subdomain)
@property
def id(self):
return self.url
class ManualTest(Test):
test_type = "manual"
@property
def id(self):
return self.url
class ReftestTest(Test):
"""A reftest
A reftest should be considered to pass if one of its references matches (see below) *and* the
reference passes if it has any references recursively.
Attributes:
references (List[Tuple[str, str]]): a list of alternate references, where one must match for the test to pass
viewport_size (Optional[Tuple[int, int]]): size of the viewport for this test, if not default
dpi (Optional[int]): dpi to use when rendering this test, if not default
"""
result_cls = ReftestResult
test_type = "reftest"
def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, references,
timeout=None, path=None, viewport_size=None, dpi=None, fuzzy=None,
protocol="http", subdomain=False):
Test.__init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, timeout,
path, protocol, subdomain)
for _, ref_type in references:
if ref_type not in ("==", "!="):
raise ValueError
self.references = references
self.viewport_size = self.get_viewport_size(viewport_size)
self.dpi = dpi
self._fuzzy = fuzzy or {}
@classmethod
def cls_kwargs(cls, manifest_test):
return {"viewport_size": manifest_test.viewport_size,
"dpi": manifest_test.dpi,
"protocol": server_protocol(manifest_test),
"fuzzy": manifest_test.fuzzy}
@classmethod
def from_manifest(cls,
manifest_file,
manifest_test,
inherit_metadata,
test_metadata):
timeout = cls.long_timeout if manifest_test.timeout == "long" else cls.default_timeout
url = manifest_test.url
node = cls(manifest_file.url_base,
manifest_file.tests_root,
manifest_test.url,
inherit_metadata,
test_metadata,
[],
timeout=timeout,
path=manifest_test.path,
subdomain=manifest_test.subdomain,
**cls.cls_kwargs(manifest_test))
refs_by_type = defaultdict(list)
for ref_url, ref_type in manifest_test.references:
refs_by_type[ref_type].append(ref_url)
# Construct a list of all the mismatches, where we end up with mismatch_1 != url !=
# mismatch_2 != url != mismatch_3 etc.
#
# Per the logic documented above, this means that none of the mismatches provided match,
mismatch_walk = None
if refs_by_type["!="]:
mismatch_walk = ReftestTest(manifest_file.url_base,
manifest_file.tests_root,
refs_by_type["!="][0],
[],
None,
[])
cmp_ref = mismatch_walk
for ref_url in refs_by_type["!="][1:]:
cmp_self = ReftestTest(manifest_file.url_base,
manifest_file.tests_root,
url,
[],
None,
[])
cmp_ref.references.append((cmp_self, "!="))
cmp_ref = ReftestTest(manifest_file.url_base,
manifest_file.tests_root,
ref_url,
[],
None,
[])
cmp_self.references.append((cmp_ref, "!="))
if mismatch_walk is None:
mismatch_refs = []
else:
mismatch_refs = [(mismatch_walk, "!=")]
if refs_by_type["=="]:
# For each == ref, add a reference to this node whose tail is the mismatch list.
# Per the logic documented above, this means any one of the matches must pass plus all the mismatches.
for ref_url in refs_by_type["=="]:
ref = ReftestTest(manifest_file.url_base,
manifest_file.tests_root,
ref_url,
[],
None,
mismatch_refs)
node.references.append((ref, "=="))
else:
# Otherwise, we just add the mismatches directly as we are immediately into the
# mismatch chain with no alternates.
node.references.extend(mismatch_refs)
return node
def update_metadata(self, metadata):
if "url_count" not in metadata:
metadata["url_count"] = defaultdict(int)
for reference, _ in self.references:
# We assume a naive implementation in which a url with multiple
# possible screenshots will need to take both the lhs and rhs screenshots
# for each possible match
metadata["url_count"][(self.environment["protocol"], reference.url)] += 1
reference.update_metadata(metadata)
return metadata
def get_viewport_size(self, override):
return override
@property
def id(self):
return self.url
@property
def keys(self):
return ("reftype", "refurl")
@property
def fuzzy(self):
return self._fuzzy
@property
def fuzzy_override(self):
values = {}
for meta in reversed(list(self.itermeta(None))):
value = meta.fuzzy
if not value:
continue
if atom_reset in value:
value.remove(atom_reset)
values = {}
for key, data in value:
if isinstance(key, (tuple, list)):
key = list(key)
key[0] = urljoin(self.url, key[0])
key[1] = urljoin(self.url, key[1])
key = tuple(key)
elif key:
# Key is just a relative url to a ref
key = urljoin(self.url, key)
values[key] = data
return values
@property
def page_ranges(self):
return {}
class PrintReftestTest(ReftestTest):
test_type = "print-reftest"
def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, references,
timeout=None, path=None, viewport_size=None, dpi=None, fuzzy=None,
page_ranges=None, protocol="http", subdomain=False):
super(PrintReftestTest, self).__init__(url_base, tests_root, url, inherit_metadata, test_metadata,
references, timeout, path, viewport_size, dpi,
fuzzy, protocol, subdomain=subdomain)
self._page_ranges = page_ranges
@classmethod
def cls_kwargs(cls, manifest_test):
rv = super(PrintReftestTest, cls).cls_kwargs(manifest_test)
rv["page_ranges"] = manifest_test.page_ranges
return rv
def get_viewport_size(self, override):
assert override is None
return (5*2.54, 3*2.54)
@property
def page_ranges(self):
return self._page_ranges
class WdspecTest(Test):
result_cls = WdspecResult
subtest_result_cls = WdspecSubtestResult
test_type = "wdspec"
default_timeout = 25
long_timeout = 180 # 3 minutes
class CrashTest(Test):
result_cls = CrashtestResult
test_type = "crashtest"
manifest_test_cls = {"reftest": ReftestTest,
"print-reftest": PrintReftestTest,
"testharness": TestharnessTest,
"manual": ManualTest,
"wdspec": WdspecTest,
"crashtest": CrashTest}
def from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata):
test_cls = manifest_test_cls[manifest_test.item_type]
return test_cls.from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import random
import re
import time
from watsononlinestore.tests.fake_discovery import FAKE_DISCOVERY
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
# Limit the result count when calling Discovery query.
DISCOVERY_QUERY_COUNT = 10
# Limit more when formatting and filtering out "weak" results.
# Also useful for allowing us to log more results for dev/test even
# though we return fewer to the client.
DISCOVERY_KEEP_COUNT = 5
# Truncate the Discovery 'text'. It can be a lot. We'll add "..." if truncated.
DISCOVERY_TRUNCATE = 500
# Available data sources for Discovery
DISCOVERY_AMAZON_STORE = "AMAZON"
DISCOVERY_IBM_STORE = "IBM_STORE"
class SlackSender:
def __init__(self, slack_client, channel):
self.slack_client = slack_client
self.channel = channel
def send_message(self, message):
"""Sends message via Slack API.
:param str message: The message to be sent to slack
"""
self.slack_client.api_call("chat.postMessage",
channel=self.channel,
text=message,
as_user=True)
def get_user_json(self, user_id):
"""Get slack user information from user_id.
:param str user_id: user ID to look up
"""
try:
# Get the authenticated user profile from Slack
user_json = self.slack_client.api_call("users.info", user=user_id)
except Exception as e:
LOG.exception("Slack client call exception:")
user_json = {'error': repr(e)}
return user_json
class OnlineStoreCustomer:
def __init__(self, email=None, first_name=None, last_name=None,
shopping_cart=None):
self.email = email
self.first_name = first_name
self.last_name = last_name
self.shopping_cart = shopping_cart
def get_customer_dict(self):
"""Returns a dict in form usable by our cloudant_online_store DB
:returns: customer dict of customer data for noSQL doc
:rtype: dict
"""
customer = {
'type': 'customer',
'email': self.email,
'first_name': self.first_name,
'last_name': self.last_name,
'shopping_cart': self.shopping_cart
}
return customer
class WatsonOnlineStore:
def __init__(self, bot_id, slack_client,
assistant_client, discovery_client,
cloudant_online_store):
# specific for Slack as UI
self.slack_client = slack_client
self.bot_id = bot_id or 'unknown bot_id'
self.at_bot = "<@" + self.bot_id + ">"
# IBM Watson Assistant
self.assistant_client = assistant_client
self.workspace_id = self.setup_assistant_workspace(
assistant_client, os.environ)
# IBM Cloudant noSQL database
self.cloudant_online_store = cloudant_online_store
# IBM Watson Discovery Service
self.discovery_client = discovery_client
self.discovery_data_source = os.environ.get(
'DISCOVERY_DATA_SOURCE', DISCOVERY_IBM_STORE)
try:
self.discovery_score_filter = float(
os.environ.get(self.discovery_data_source +
'_DISCO_SCORE_FILTER', 0))
except ValueError:
LOG.debug(self.discovery_data_source +
"_DISCO_SCORE_FILTER must " +
"be a number between 0.0 and 1.0. " +
"Using default value of 0.0")
self.discovery_score_filter = 0.0
pass
self.discovery_environment_id, self.discovery_collection_id = (
self.setup_discovery_collection(discovery_client,
self.discovery_data_source,
os.environ))
self.context = {}
self.customer = None
self.response_tuple = None
self.delay = 0.5 # second
@staticmethod
def setup_assistant_workspace(assistant_client, environ):
"""Verify and/or initialize the Assistant workspace.
If a WORKSPACE_ID is specified in the runtime environment,
make sure that workspace exists. If no WORKSTATION_ID is
specified then try to find it using a lookup by name.
Name will be 'watson-online-store' unless overridden
using the WORKSPACE_NAME environment variable.
If a workspace is not found by ID or name, then try to
create one from the JSON in data/workspace.json. Use the
name as mentioned above so future lookup will find what
was created.
:param assistant_client: Assistant service client
:param object environ: runtime environment variables
:return: ID of Assistant workspace to use
:rtype: str
:raise Exception: When workspace is not found and cannot be created
"""
# Get the actual workspaces
workspaces = assistant_client.list_workspaces().get_result()[
'workspaces']
env_workspace_id = environ.get('WORKSPACE_ID')
if env_workspace_id:
# Optionally, we have an env var to give us a WORKSPACE_ID.
# If one was set in the env, require that it can be found.
LOG.debug("Using WORKSPACE_ID=%s" % env_workspace_id)
for workspace in workspaces:
if workspace['workspace_id'] == env_workspace_id:
ret = env_workspace_id
break
else:
raise Exception("WORKSPACE_ID=%s is specified in a runtime "
"environment variable, but that workspace "
"does not exist." % env_workspace_id)
else:
# Find it by name. We may have already created it.
name = environ.get('WORKSPACE_NAME', 'watson-online-store')
for workspace in workspaces:
if workspace['name'] == name:
ret = workspace['workspace_id']
LOG.debug("Found WORKSPACE_ID=%(id)s using lookup by "
"name=%(name)s" % {'id': ret, 'name': name})
break
else:
# Not found, so create it.
LOG.debug("Creating workspace from data/workspace.json...")
workspace = WatsonOnlineStore.get_workspace_json()
created = assistant_client.create_workspace(
name=name,
description="Assistant workspace "
"created by watson-online-store.",
language=workspace['language'],
intents=workspace['intents'],
entities=workspace['entities'],
dialog_nodes=workspace['dialog_nodes'],
counterexamples=workspace['counterexamples'],
metadata=workspace['metadata']).get_result()
ret = created['workspace_id']
LOG.debug("Created WORKSPACE_ID=%(id)s with "
"name=%(name)s" % {'id': ret, 'name': name})
return ret
@staticmethod
def setup_discovery_collection(discovery_client,
data_source,
environ):
""" Ensure that the collection exists in the Watson Discovery service.
:param discovery_client: discovery service client
:param str data_source: name of the discovery data source
:param object environ: runtime environment variables
:return: ID of discovery environment and collection to use
:rtype: str
:raise Exception: When collection is not found and cannot be created
"""
# If environment id exists, ensure it is valid.
environment_id = environ.get('DISCOVERY_ENVIRONMENT_ID')
if environment_id:
try:
LOG.debug("Using DISCOVERY_ENVIRONMENT_ID=%s" % environment_id)
discovery_client.get_environment(environment_id)
except Exception as e:
print(e)
raise Exception("Environment with DISCOVERY_ENVIRONMENT_ID=%s "
"not found." % environment_id)
else:
# Try to find the environment by name.
name = environ.get('DISCOVERY_ENVIRONMENT_NAME',
'watson-online-store')
environments = discovery_client.list_environments().get_result()
for environment in environments['environments']:
if environment['name'] == name:
environment_id = environment['environment_id']
LOG.debug("Found DISCOVERY_ENVIRONMENT_ID=%(id)s using "
"lookup by name=%(name)s" %
{'id': environment_id, 'name': name})
break
elif not environment['read_only']:
# Last resort will be to use an available one, but
# cannot use/modify a read-only environment.
environment_id = environment['environment_id']
if not environment_id:
# No existing environment found, so create it.
# NOTE that the number of environments that can be created
# under a trial Bluemix account is limited to one environment
# per organization.
try:
LOG.debug("Creating discovery environment...")
created = discovery_client.create_environment(
name=name,
description="Discovery environment created by "
"watson-online-store.").get_result()
environment_id = created['environment_id']
LOG.debug("Created DISCOVERY_ENVIRONMENT_ID=%(id)s with "
"name=%(name)s" %
{'id': environment_id, 'name': name})
except Exception as e:
raise Exception("Error creating Discovery "
"Error: %s" % repr(e))
# Determine if collection exists.
collection_id = environ.get('DISCOVERY_COLLECTION_ID')
if collection_id:
try:
LOG.debug("Using DISCOVERY_COLLECTION_ID=%s" % collection_id)
discovery_client.get_collection(environment_id,
collection_id)
return environment_id, collection_id
except Exception:
raise Exception("Collection with DISCOVERY_COLLECTION_ID=%s "
"does not exist." % collection_id)
else:
# Try to find collection by name. Search all collections
# that exist in the discovery environment.
# Discovery collection names
amazon_collection_name = "amazon-shopping"
ibm_collection_name = "ibm-logo-store"
# File path location to discovery files
amazon_data_path = "data/amazon_data_html/"
ibm_data_path = "data/ibm_store/"
collections = discovery_client.list_collections(
environment_id).get_result()['collections']
for coll in collections:
if ((data_source == DISCOVERY_AMAZON_STORE and
coll['name'] == amazon_collection_name) or
(data_source == DISCOVERY_IBM_STORE and
coll['name'] == ibm_collection_name)):
return environment_id, coll['collection_id']
# Doesn't exist, so create it.
LOG.debug("Creating collection from data files...")
try:
if data_source == DISCOVERY_AMAZON_STORE:
name = amazon_collection_name
path = amazon_data_path
elif data_source == DISCOVERY_IBM_STORE:
name = ibm_collection_name
path = ibm_data_path
if name:
collection = discovery_client.create_collection(
environment_id,
name).get_result()
# Add documents to collection
if collection:
collection_id = collection['collection_id']
for _, _, files in os.walk(path):
for fname in files:
if fname.endswith('.html') or \
fname.endswith('.json'):
with open(os.path.join(path, fname), 'r') as f:
data = f.read()
discovery_client.add_document(environment_id,
collection_id,
file=data,
filename=fname)
except Exception as e:
raise Exception("Discovery Collection could not be created. "
"Error: %s" % repr(e))
if not collection_id:
raise Exception("Discovery Collection could not be found "
"or created.")
return environment_id, collection_id
@staticmethod
def get_workspace_json():
with open('data/workspace.json') as workspace_file:
workspace = json.load(workspace_file)
return workspace
def context_merge(self, dict1, dict2):
"""Combine 2 dicts into one for Watson Assistant context.
Common data in dict2 will override data in dict1
:param dict dict1: original context dictionary
:param dict dict2: new context dictionary - will override fields
:returns: new_dict for context
:rtype: dict
"""
new_dict = dict1.copy()
if dict2:
new_dict.update(dict2)
return new_dict
def parse_slack_output(self, output_dict):
"""Prepare output when using Slack as UI.
:param dict output_dict: text, channel, user, etc from slack posting
:returns: text, channel, user
:rtype: str, str, str
"""
if output_dict and len(output_dict) > 0:
for output in output_dict:
if (output and 'text' in output and 'user' in output and
('bot_id' not in output) and
('user_profile' not in output)):
if self.at_bot in output['text']:
return (
''.join(output['text'].split(self.at_bot
)).strip().lower(),
output['channel'],
output['user'])
elif (output['channel'].startswith('D') and
output['user'] != self.bot_id):
# Direct message!
return (output['text'].strip().lower(),
output['channel'],
output['user'])
return None, None, None
def add_customer_to_context(self):
"""Send Customer info to Watson using context.
The customer data from the UI is in the Cloudant DB, or has
been added. Now add it to the context and pass back to Watson.
"""
self.context = self.context_merge(self.context,
self.customer.get_customer_dict())
def customer_from_db(self, user_data):
"""Set the customer using data from Cloudant DB.
We have the Customer in the Cloudant DB. Create a Customer object from
this data and set for this instance of WatsonOnlineStore
:param dict user_data: email, first_name, and last_name
"""
email_addr = user_data['email']
first = user_data['first_name']
last = user_data['last_name']
self.customer = OnlineStoreCustomer(email=email_addr,
first_name=first,
last_name=last,
shopping_cart=[])
def create_user_from_ui(self, user_json):
"""Set the customer using data from Slack.
Authenticated user in slack will have email, First, and Last
names. Create a user in the DB for this. Note that a different
UI will require different code here.
json info in ['user']['profile']
:param dict user_json: email, first_name, and last_name
"""
email_addr = user_json['user']['profile']['email']
try:
first = user_json['user']['profile']['first_name']
except KeyError:
first = email_addr
try:
last = user_json['user']['profile']['last_name']
except KeyError:
last = email_addr
self.customer = OnlineStoreCustomer(email=email_addr,
first_name=first,
last_name=last,
shopping_cart=[])
def init_customer(self, sender, user_id):
"""Get user from DB, or create entry for user.
:param object sender: Client-specific implementation
:param str user_id: User ID
"""
assert user_id
user_json = sender.get_user_json(user_id)
# Not found returns json with error.
LOG.debug("user_from_slack:\n{}\n".format(user_json))
if user_json and 'user' in user_json:
cust = user_json['user'].get('profile', {}).get('email')
if cust:
user_data = self.cloudant_online_store.find_customer(cust)
if user_data:
# We found this Slack user in our Cloudant DB
LOG.debug("user_from_DB\n{}\n".format(user_data))
self.customer_from_db(user_data)
else:
# Didn't find Slack user in DB, so add them
self.create_user_from_ui(user_json)
self.cloudant_online_store.add_customer_obj(self.customer)
if self.customer:
# Now Watson will have customer info
self.add_customer_to_context()
def get_fake_discovery_response(self):
"""Returns fake response from IBM Discovery for testing purposes.
:returns: list of Urls
:rtype: list
"""
index = random.randint(0, len(FAKE_DISCOVERY)-1)
ret_string = {'discovery_result': FAKE_DISCOVERY[index]}
return ret_string
def handle_discovery_query(self):
"""Take query string from Watson Context and send to Discovery.
Discovery response will be merged into context in order to allow it to
be returned to Watson. In the case where there is no discovery client,
a fake response will be returned, for testing purposes.
:returns: False indicating no need for UI input, just return to Watson
:rtype: Bool
"""
query_string = self.context['discovery_string']
if self.discovery_client:
try:
response = self.get_discovery_response(query_string)
except Exception as e:
response = {'discovery_result': repr(e)}
else:
response = self.get_fake_discovery_response()
self.context = self.context_merge(self.context, response)
LOG.debug("watson_discovery:\n{}\ncontext:\n{}".format(response,
self.context))
# no need for user input, return to Watson Dialogue
return False
def get_watson_response(self, message):
"""Sends text and context to Watson and gets reply.
Message input is text, self.context is also added and sent to Watson.
:param str message: text to send to Watson
:returns: json dict from Watson
:rtype: dict
"""
response = self.assistant_client.message(
workspace_id=self.workspace_id,
input={'text': message},
context=self.context)
if self.context is None:
LOG.debug("Initializing context")
self.context = self.context_merge(self.context,
response['context'])
return response
@staticmethod
def format_discovery_response(response, data_source):
"""Format data for Slack based on discovery data source.
This method handles the different data source data and formats
it specifically for Slack.
The following functions are specific to the data source that has
been fed into the Watson Discovery service. This example has two
data sources to choose from: "IBM_STORE" and "AMAZON'. Which data
source is being used is specified in the ".env" file by setting
the following key values:
<data_source>_DISCO_COLLECTION_ID=<collection id of data source>
<data_source>_DISCO_SCORE_FILTER=<float value betweem 0.0. and 1.0>
DISCOVERY_DATA_SOURCE="<data source string name>"
This pattern should be followed if additional data sources are
added.
:param dict response: output from Discovery
:param string data_source: name of the discovery data source
:returns: cart_number, name, url, image for each item returned
:rtype: dict
"""
output = []
if not response.get('results'):
return output
def get_product_name(entry):
""" Pull product name from entry data for nice user display.
:param dict entry: output from Discovery
:returns: name of product
:rtype: str
"""
product_name = ""
if data_source == DISCOVERY_AMAZON_STORE:
# For amazon data, Watson Discovery has pulled the
# product name from the html page and stored it as
# "title" in its enriched metadata that it generates.
if 'extracted_metadata' in entry:
metadata = entry['extracted_metadata']
if 'title' in metadata:
product_name = metadata['title']
elif data_source == DISCOVERY_IBM_STORE:
# For IBM store data, the product name was placed in
# in the 'title' field.
if 'title' in entry:
product_name = entry['title']
return product_name
def get_product_price(entry):
""" Pull product price from entry data for nice user display.
:param dict entry: output from Discovery
:returns: name of product
:rtype: str
"""
product_price = ""
if data_source == DISCOVERY_AMAZON_STORE:
# For amazon data, Watson Discovery has pulled the
# product name from the html page and stored it as
# "title" in its enriched metadata that it generates.
if 'extracted_metadata' in entry:
metadata = entry['extracted_metadata']
if 'price' in metadata:
product_price = metadata['price']
elif data_source == DISCOVERY_IBM_STORE:
# For IBM store data, the product name was placed in
# in the 'title' field.
if 'price' in entry:
product_price = entry['price']
return product_price
def get_product_url(entry):
""" Pull product url from entry data so user can navigate
to product page.
:param dict entry: output from Discovery
:returns: url link to product description
:rtype: str
"""
product_url = ""
if data_source == DISCOVERY_AMAZON_STORE:
if 'html' in entry:
html = entry['html']
# For amazon data, the product URL is stored in an
# href tag located at the end of the html doc.
href_tag = "<a href="
# Search from bottom of the doc.
sidx = html.rfind(href_tag)
if sidx > 0:
sidx += len(href_tag)
eidx = html.find('>', sidx, len(html))
if eidx > 0:
product_url = html[sidx+1:eidx-1]
elif data_source == DISCOVERY_IBM_STORE:
if 'product_page' in entry:
# For IBM store data, the product URL is located in the
# 'product_page' field.
product_url = entry['product_page']
return product_url
def get_image_url(entry):
"""Pull product image url from entry data to allow
pictures in slack.
:param dict entry: output from Discovery
:returns: url link to product image
:rtype: str
"""
image_url = ""
if data_source == DISCOVERY_AMAZON_STORE:
# There is no image url for Amazon data,
# so use the product url.
return get_product_url(entry)
elif data_source == DISCOVERY_IBM_STORE:
# For IBM store data, the image url is located in the
# 'image_url' field.
if 'image_url' in entry:
image_url = re.sub(
r'scale\[[0-9]+\]', 'scale[50]', entry['image_url'])
return image_url
def slack_encode(input_text):
"""Remove chars <, &, > for Slack.
:param str input_text: text to be cleaned for Slack
:returns: text without undesirable chars
:rtype: str
"""
if not input_text:
return input_text
args = [('&', '&'), ('<', '<'), ('>', '>')]
for from_to in args:
input_text = input_text.replace(*from_to)
return input_text
results = response['results']
cart_number = 1
for i in range(min(len(results), DISCOVERY_KEEP_COUNT)):
result = results[i]
product_data = {
"cart_number": str(cart_number),
"name": slack_encode(get_product_name(result)),
"price": slack_encode(get_product_price(result)),
"url": slack_encode(get_product_url(result)),
"image": slack_encode(get_image_url(result)),
}
cart_number += 1
output.append(product_data)
return output
def get_discovery_response(self, input_text):
"""Call discovery with input_text and return formatted response.
Formatted response_tuple is saved for WatsonOnlineStore to allow item
to be easily added to shopping cart.
Response is then further formatted to be passed to UI.
:param str input_text: query to be used with Watson Discovery Service
:returns: Discovery response in format for Watson Assistant
:rtype: dict
"""
discovery_response = self.discovery_client.query(
environment_id=self.discovery_environment_id,
collection_id=self.discovery_collection_id,
query=input_text,
count=DISCOVERY_QUERY_COUNT
).get_result()
# Watson discovery assigns a confidence level to each result.
# Based on data mix, we can assign a minimum tolerance value in an
# attempt to filter out the "weakest" results.
if self.discovery_score_filter and 'results' in discovery_response:
fr = [x for x in discovery_response['results'] if 'score' in x and
x['score'] > self.discovery_score_filter]
discovery_response['matching_results'] = len(fr)
discovery_response['results'] = fr
response = self.format_discovery_response(discovery_response,
self.discovery_data_source)
self.response_tuple = response
fmt = "{cart_number}) {name} {price}\n{image}"
formatted_response = "\n".join(fmt.format(**item) for item in response)
return {'discovery_result': formatted_response}
def handle_list_shopping_cart(self):
"""Get shopping_cart from DB and return formatted version to Watson
:returns: formatted shopping_cart items
:rtype: str
"""
cust = self.customer.email
shopping_list = self.cloudant_online_store.list_shopping_cart(cust)
fmt = "\n{}) {name} - `{price}`\n{url}\n"
formatted_out = "\n".join(fmt.format(i + 1, **item)
for i, item in enumerate(shopping_list))
grand_total = 0.0
for item in shopping_list:
priceStr = item['price']
grand_total = grand_total + float(priceStr.replace('$', ''))
self.context['shopping_cart'] = formatted_out
self.context['grand_total'] = "%.2f" % grand_total
# no need for user input, return to Watson Dialogue
return False
def clear_shopping_cart(self):
"""Clear shopping_cart and cart_item fields in context
"""
self.context['shopping_cart'] = ''
self.context['cart_item'] = ''
def handle_delete_from_cart(self):
"""Pulls cart_item from Watson context and deletes from Cloudant DB
cart_item in context must be an int or delete will silently fail.
"""
email = self.customer.email
shopping_list = self.cloudant_online_store.list_shopping_cart(email)
try:
item_num = int(self.context['cart_item'])
except ValueError:
LOG.exception("cart_item must be a number")
return False
for index, item in enumerate(shopping_list):
if index+1 == item_num:
self.cloudant_online_store.delete_item_shopping_cart(email,
item)
self.clear_shopping_cart()
# no need for user input, return to Watson Dialogue
return False
def handle_delete_all_from_cart(self):
"""Pulls cart_item from Watson context and deletes from Cloudant DB
cart_item in context must be an int or delete will silently fail.
"""
email = self.customer.email
shopping_list = self.cloudant_online_store.list_shopping_cart(email)
for item in shopping_list:
self.cloudant_online_store.delete_item_shopping_cart(email, item)
self.clear_shopping_cart()
# no need for user input, return to Watson Dialogue
return False
def handle_add_to_cart(self):
"""Adds cart_item from Watson context and saves in Cloudant DB
cart_item in context must be an int or add/save will silently fail.
"""
try:
cart_item = int(self.context['cart_item'])
except ValueError:
LOG.exception("cart_item must be a number")
return False
email = self.customer.email
for index, entry in enumerate(self.response_tuple):
if index+1 == cart_item:
item = {
"item_id": str(cart_item),
"name": entry['name'],
"price": entry['price'],
"url": entry['url']
}
self.cloudant_online_store.add_to_shopping_cart(email, item)
self.clear_shopping_cart()
# no need for user input, return to Watson Dialogue
return False
def handle_message(self, message, sender):
"""Handler for messages coming from Watson Assistant using context.
Fields in context will trigger various actions in this application.
:param str message: text from UI
:param object sender: used for client's send_message implementation
:returns: True if UI input is required, False if we want app
processing and no input
:rtype: Bool
"""
watson_response = self.get_watson_response(message).get_result()
if 'context' in watson_response:
self.context = watson_response['context']
sender.send_message("\n".join(watson_response['output']['text']) +
"\n")
if (self.context.get('discovery_string') and self.discovery_client):
return self.handle_discovery_query()
cart_action = self.context.get('shopping_cart')
if cart_action == 'list':
return self.handle_list_shopping_cart()
elif cart_action == 'add':
if ('cart_item' in self.context and
self.context['cart_item'] != ''):
return self.handle_add_to_cart()
elif cart_action == 'checkout':
return self.handle_delete_all_from_cart()
elif cart_action == 'delete':
if ('cart_item' in self.context.keys() and
self.context['cart_item'] != ''):
return self.handle_delete_from_cart()
# This is a context variable defined in Assistant dialogs
# to control if there is a need to wait for user input.
# returning false means there is no user input needed.
if self.context.get('get_input') == 'no':
return False
return True
def handle_conversation(self, message, sender, user):
"""Handler for messages coming from user.
Loops when additional input is needed.
:param str message: text from UI
:param sender: a sender impl used for send_message
:param str user: user ID
"""
if user and not self.customer:
self.init_customer(sender, user)
get_input = self.handle_message(message, sender)
while not get_input:
get_input = self.handle_message(message, sender)
def run(self):
"""Main run loop of the application with a Slack client
"""
# make sure DB exists
self.cloudant_online_store.init()
if self.slack_client and self.slack_client.rtm_connect():
LOG.info("Watson Online Store bot is connected and running!")
while True:
slack_output = self.slack_client.rtm_read()
if slack_output:
LOG.debug("slack output\n:{}\n".format(slack_output))
message, channel, user = self.parse_slack_output(slack_output)
if message:
LOG.debug("message:\n %s\n channel:\n %s\n" %
(message, channel))
if message and channel and 'unfurl' not in message:
sender = SlackSender(self.slack_client, channel)
self.handle_conversation(message, sender, user)
time.sleep(self.delay)
else:
LOG.warning("Connection failed. Invalid Slack token or bot ID?")
| |
# coding=utf-8
#
# Copyright 2012 Guokai (benben.cc)
# Do have a faith in what you're doing.
# Make your life a story worth telling.
import re
do_dict = {
"where": "__condition",
"table": "__table_name",
"limit": "__limit",
"order": "__order",
"field": "__field",
"data": "__data",
"group": "__group",
"having": "__having",
"join": "__join",
}
class Query(object):
def __init__(self, table_name=None, db=None):
if not table_name == None:
self.table_name = table_name
if not db == None:
self.db = db
self.__reset()
def __reset(self):
self.__cluster = []
self.__protected = {}
self.__protected["__field"] = "*"
self.__protected["__table_name"] = self.table_name
def __close(self):
self.__reset()
def __tracker(self, name):
if (not name in self.__cluster): self.__cluster.append(name)
def __check(self, name):
return True if (name in self.__cluster) else False
def __do(self, name, value):
value = value.strip() if type(value) == type('string') else value
self.__protected[do_dict[name]] = value
self.__tracker(name)
def __sqlfix(self, sql):
sql = re.sub(r"(?<!%)%(?!%)", "%%", sql)
sql = re.sub(r"(?<!\\)\\(?!\\)", r"\\\\", sql)
return sql
def __valuefix(self, value):
value = re.sub(r"\'", "''", value) if type(value) == type("string") or type(value) == type("unicode") else value
return value
def __sqlbuild(self, sql='', queue=[]):
for statement in queue:
if (self.__check("join") and statement == "join"):
sql = sql + " %s" % self.__protected["__join"]
if (self.__check("where") and statement == "where"):
sql = sql + " WHERE %s" % self.__protected["__condition"]
if (self.__check("order") and statement == "order"):
sql = sql + " ORDER BY %s" % self.__protected["__order"]
if (self.__check("limit") and statement == "limit"):
sql = sql + " LIMIT %s" % self.__protected["__limit"]
if (self.__check("group") and statement == "group"):
sql = sql + " GROUP BY %s" % self.__protected["__group"]
if (self.__check("having") and statement == "having"):
sql = sql + " HAVING %s" % self.__protected["__having"]
if (self.__check("data") and statement == "data:save"):
sets = ""
for data in self.__protected["__data"]:
sets = sets + "%s = '%s', " % (data, self.__valuefix(self.__protected["__data"][data]))
sets = sets.strip().rstrip(",")
sql = sql + " SET %s" % sets
if (self.__check("data") and statement == "data:add"):
sets = ""
values = ""
for data in self.__protected["__data"]:
sets = sets + "%s, " % data
values = values + "'%s', " % self.__valuefix(self.__protected["__data"][data])
sets = sets.strip().rstrip(",")
values = values.strip().rstrip(",")
sql = sql + " (%s)" % sets
sql = sql + " VALUES (%s)" % values
return sql
def prepend(self, name, value):
self.__protected[do_dict[name]] = "%s AND %s" % (value, self.__protected[do_dict[name]])
return self
def table(self, table_name):
self.__do("table", table_name)
return self
def where(self, condition):
self.__do("where", condition)
return self
def limit(self, start, end=None):
limit = start if not end else "%s, %s" % (start, end)
self.__do("limit", limit)
return self
def order(self, type):
self.__do("order", type)
return self
def field(self, field):
self.__do("field", field)
return self
def data(self, data):
self.__do("data", data)
return self
def group(self, type):
self.__do("group", type)
return self
def having(self, condition):
self.__do("having", condition)
return self
def join(self, condition):
self.__do("join", condition)
return self
def query(self, sql):
self.__close()
sql = self.__sqlfix(sql)
return self.db.query(sql)
def grasp(self, sql):
select_regx = re.compile(
"SELECT (COUNT\()?(?P<field>[\w\*\s\.,]+)\)? FROM (?P<table_name>.*?)(LIMIT|ORDER|GROUP|HAVING|WHERE|LEFT|RIGHT|INNER|$)",
re.I)
where_complex_regx = re.compile("WHERE (?P<condition>.*?)(LIMIT|ORDER|GROUP|HAVING|LEFT|RIGHT|INNER)", re.I)
where_regx = re.compile("WHERE (?P<condition>.*)", re.I)
limit_regx = re.compile("LIMIT (?P<start>\d+),?\s*(?P<end>\d+)?", re.I)
group_regx = re.compile("GROUP BY (?P<group_by>[\w\.]+)", re.I)
having_regx = re.compile("HAVING (?P<having>\w+)", re.I)
order_regx = re.compile(
"ORDER BY (?P<order_by>[\w\.\,\s]+\s+(ASC|DESC|\(\)|\s))\s*(LIMIT|GROUP|HAVING|WHERE|LEFT|RIGHT|INNER|$)",
re.I)
insert_regx = re.compile(
"INSERT INTO (?P<table_name>\w+) \(((\w+,?\s?)+)\) VALUES \((([\"']?\w+[\"']?,?\s?)+)\)", re.I)
update_complex_regx = re.compile(
"UPDATE (?P<table_name>\w+) SET (.*?)(LIMIT|ORDER|GROUP|HAVING|WHERE|LEFT|RIGHT|INNER)", re.I)
update_regx = re.compile("UPDATE (?P<table_name>\w+) SET (.*)", re.I)
table_regx = re.compile("FROM (?P<table_name>.*?)(LIMIT|ORDER|GROUP|HAVING|WHERE|LEFT|RIGHT|INNER|$)", re.I)
join_regx = re.compile(
"(?P<join_condition>(?P<join_dir>LEFT|RIGHT)?\s*(?P<join_type>INNER|OUTER)? JOIN (?P<table_name>\w+) (AS \w+\s+)?ON (.*?))(LIMIT|ORDER|GROUP|HAVING|WHERE)",
re.I)
select = select_regx.search(sql)
where_complex = where_complex_regx.search(sql)
where = where_regx.search(sql)
limit = limit_regx.search(sql)
group = group_regx.search(sql)
having = having_regx.search(sql)
order = order_regx.search(sql)
insert = insert_regx.search(sql)
update_complex = update_complex_regx.search(sql)
update = update_regx.search(sql)
table = table_regx.search(sql)
join = join_regx.search(sql)
if select:
_field = select.groupdict()["field"]
_table_name = select.groupdict()["table_name"]
self.__do("field", _field)
self.__do("table", _table_name)
if where_complex:
_condition = where_complex.groupdict()["condition"]
self.__do("where", _condition)
elif where:
_condition = where.groupdict()["condition"]
self.__do("where", _condition)
if limit:
start = limit.groupdict()["start"]
end = limit.groupdict()["end"]
_limit = start if not end else "%s, %s" % (start, end)
self.__do("limit", _limit)
if group:
_group_by = group.groupdict()["group_by"]
self.__do("group", _group_by)
if having:
_having = group.groupdict()["having"]
self.__do("having", _having)
if order:
_order_by = order.groupdict()["order_by"]
self.__do("order", _order_by)
if table:
_table_name = table.groupdict()["table_name"]
self.__do("table", _table_name)
if join:
_join = join.groupdict()["join_condition"]
self.__do("join", _join)
if insert:
_table_name = insert.groupdict()["table_name"]
fields = insert.groups()[1].split(",")
values = insert.groups()[3].split(",")
_data = {}
for index, field in enumerate(fields):
field = field.strip()
value = values[index].strip()
_data[field] = value
self.__do("data", _data)
self.__do("table", _table_name)
if update_complex:
_table_name = update_complex.groupdict()["table_name"]
pairs = update_complex.groups()[1].split(",")
_data = {}
for index, pair in enumerate(pairs):
pair = pair.split("=")
field = pair[0].strip()
value = pair[1].strip()
_data[field] = value
self.__do("data", _data)
self.__do("table", _table_name)
elif update:
_table_name = update.groupdict()["table_name"]
pairs = update.groups()[1].split(",")
_data = {}
for index, pair in enumerate(pairs):
pair = pair.split("=")
field = pair[0].strip()
value = pair[1].strip()
_data[field] = value
self.__do("data", _data)
self.__do("table", _table_name)
return self
def count(self, cheat=False):
sql = "SELECT COUNT(*) FROM %s" % self.__protected["__table_name"]
sql = self.__sqlbuild(sql, ["join", "where", "group", "having"])
sql = self.__sqlfix(sql)
self.__close()
group_having_regx = re.compile("(GROUP|HAVING)", re.I)
if (not group_having_regx.search(sql)):
return self.db.get(sql)["COUNT(*)"] if not cheat else sql
else:
return len(self.db.query(sql)) if not cheat else sql
def sum(self, field, cheat=False):
sql = "SELECT SUM(%s) FROM %s" % (field, self.__protected["__table_name"])
sql = self.__sqlbuild(sql, ["where"])
sql = self.__sqlfix(sql)
self.__close()
return self.db.get(sql)["SUM(%s)" % field] if not cheat else sql
def find(self, cheat=False):
try:
return self.select()[0] if not cheat else self.select(cheat)
except:
return None
def select(self, cheat=False):
sql = "SELECT %s FROM %s" % (self.__protected["__field"], self.__protected["__table_name"])
sql = self.__sqlbuild(sql, ["join", "where", "group", "having", "order", "limit"])
sql = self.__sqlfix(sql)
self.__close()
return self.db.query(sql) if not cheat else sql
def delete(self, cheat=False):
sql = "DELETE FROM %s" % self.__protected["__table_name"]
sql = self.__sqlbuild(sql, ["where", "order", "limit"])
sql = self.__sqlfix(sql)
self.__close()
return self.db.execute(sql) if not cheat else sql
def save(self, cheat=False):
sql = "UPDATE %s" % self.__protected["__table_name"]
sql = self.__sqlbuild(sql, ["data:save", "where"])
sql = self.__sqlfix(sql)
self.__close()
return self.db.execute(sql) if not cheat else sql
def add(self, cheat=False):
sql = "INSERT INTO %s" % self.__protected["__table_name"]
sql = self.__sqlbuild(sql, ["data:add"])
sql = self.__sqlfix(sql)
self.__close()
return self.db.execute(sql) if not cheat else sql
def pages(self, current_page=1, list_rows=40, cheat=False):
sql = self.select(cheat=True)
self.__close()
count = self.grasp(sql).count()
pages = count / list_rows
pages = pages + 1 if not count % list_rows == 0 else pages
if (pages == 0): pages = 1
if (current_page < 1): current_page = 1
if (current_page > pages): current_page = pages
start = (current_page - 1) * list_rows
end = list_rows
previous_page = current_page - 1 if current_page > 1 else 1
next_page = current_page + 1 if current_page < pages else pages
result = {}
result["list"] = self.grasp(sql).limit(start, end).select()
result["page"] = {
"prev": previous_page,
"next": next_page,
"current": current_page,
"pages": pages,
"total": count,
}
return result if not cheat else self.grasp(sql).limit(start, end).select(cheat)
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from collections import MutableSet
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.plugins.internal.api_trakt import get_api_url, get_entry_ids, get_session, make_list_slug
from flexget.utils import json
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException, TimedLimiter
from flexget.utils.tools import split_title_year
log = logging.getLogger('trakt_list')
IMMUTABLE_LISTS = []
def generate_show_title(item):
show_info = item['show']
if show_info['year']:
return '%s (%s)' % (show_info['title'], show_info['year'])
else:
return show_info['title']
def generate_episode_title(item):
show_info = item['show']
episode_info = item['episode']
if show_info['year']:
return ('%s (%s) S%02dE%02d %s' % (show_info['title'], show_info['year'], episode_info['season'],
episode_info['number'], episode_info['title'] or '')).strip()
else:
return ('%s S%02dE%02d %s' % (show_info['title'], episode_info['season'],
episode_info['number'], episode_info['title'] or '')).strip()
field_maps = {
'movie': {
'title': lambda i: '%s (%s)' % (i['movie']['title'], i['movie']['year'])
if i['movie']['year'] else '%s' % i['movie']['title'],
'movie_name': 'movie.title',
'movie_year': 'movie.year',
'trakt_movie_name': 'movie.title',
'trakt_movie_year': 'movie.year',
'imdb_id': 'movie.ids.imdb',
'tmdb_id': 'movie.ids.tmdb',
'trakt_movie_id': 'movie.ids.trakt',
'trakt_movie_slug': 'movie.ids.slug'
},
'show': {
'title': generate_show_title,
'series_name': generate_show_title,
'trakt_series_name': 'show.title',
'trakt_series_year': 'show.year',
'imdb_id': 'show.ids.imdb',
'tvdb_id': 'show.ids.tvdb',
'tvrage_id': 'show.ids.tvrage',
'tmdb_id': 'show.ids.tmdb',
'trakt_show_id': 'show.ids.trakt',
'trakt_show_slug': 'show.ids.slug'
},
'episode': {
'title': generate_episode_title,
'series_name': generate_show_title,
'trakt_series_name': 'show.title',
'trakt_series_year': 'show.year',
'series_season': 'episode.season',
'series_episode': 'episode.number',
'series_id': lambda i: 'S%02dE%02d' % (i['episode']['season'], i['episode']['number']),
'imdb_id': 'show.ids.imdb',
'tvdb_id': 'show.ids.tvdb',
'tvrage_id': 'show.ids.tvrage',
'trakt_episode_id': 'episode.ids.trakt',
'trakt_show_id': 'show.ids.trakt',
'trakt_show_slug': 'show.ids.slug',
'trakt_ep_name': 'episode.title'
}
}
class TraktSet(MutableSet):
@property
def immutable(self):
if self.config['list'] in IMMUTABLE_LISTS:
return '%s list is not modifiable' % self.config['list']
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'account': {'type': 'string'},
'list': {'type': 'string'},
'type': {'type': 'string', 'enum': ['shows', 'seasons', 'episodes', 'movies', 'auto'], 'default': 'auto'},
'strip_dates': {'type': 'boolean', 'default': False},
'language': {'type': 'string', 'minLength': 2, 'maxLength': 2}
},
'required': ['list'],
'anyOf': [{'required': ['username']}, {'required': ['account']}],
'error_anyOf': 'At least one of `username` or `account` options are needed.',
'additionalProperties': False
}
def __init__(self, config):
self.config = config
if self.config.get('account') and not self.config.get('username'):
self.config['username'] = 'me'
self.session = get_session(self.config.get('account'))
# Lists may not have modified results if modified then accessed in quick succession.
self.session.add_domain_limiter(TimedLimiter('trakt.tv', '2 seconds'))
self._items = None
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def add(self, entry):
self.submit([entry])
def __ior__(self, entries):
# Optimization to submit multiple entries at same time
self.submit(entries)
def discard(self, entry):
self.submit([entry], remove=True)
def __isub__(self, entries):
# Optimization to submit multiple entries at same time
self.submit(entries, remove=True)
def _find_entry(self, entry):
for item in self.items:
if self.config['type'] in ['episodes', 'auto'] and self.episode_match(entry, item):
return item
if self.config['type'] in ['seasons', 'auto'] and self.season_match(entry, item):
return item
if self.config['type'] in ['shows', 'auto'] and self.show_match(entry, item):
return item
if self.config['type'] in ['movies', 'auto'] and self.movie_match(entry, item):
return item
def __contains__(self, entry):
return self._find_entry(entry) is not None
def clear(self):
if self.items:
for item in self.items:
self.discard(item)
self._items = None
def get(self, entry):
return self._find_entry(entry)
# -- Public interface ends here -- #
@property
def items(self):
if self._items is None:
if self.config['list'] in ['collection', 'watched'] and self.config['type'] == 'auto':
raise plugin.PluginError('`type` cannot be `auto` for %s list.' % self.config['list'])
endpoint = self.get_list_endpoint()
log.verbose('Retrieving `%s` list `%s`', self.config['type'], self.config['list'])
try:
result = self.session.get(get_api_url(endpoint))
try:
data = result.json()
except ValueError:
log.debug('Could not decode json from response: %s', result.text)
raise plugin.PluginError('Error getting list from trakt.')
except RequestException as e:
raise plugin.PluginError('Could not retrieve list from trakt (%s)' % e)
if not data:
log.warning('No data returned from trakt for %s list %s.', self.config['type'], self.config['list'])
return []
entries = []
list_type = (self.config['type']).rstrip('s')
for item in data:
if self.config['type'] == 'auto':
list_type = item['type']
# Collection and watched lists don't return 'type' along with the items (right now)
if 'type' in item and item['type'] != list_type:
log.debug('Skipping %s because it is not a %s', item[item['type']].get('title', 'unknown'),
list_type)
continue
if list_type != 'episode' and not item[list_type]['title']:
# Skip shows/movies with no title
log.warning('Item in trakt list does not appear to have a title, skipping.')
continue
entry = Entry()
if list_type == 'episode':
entry['url'] = 'https://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (
item['show']['ids']['slug'], item['episode']['season'], item['episode']['number'])
else:
entry['url'] = 'https://trakt.tv/%ss/%s' % (list_type, item[list_type]['ids'].get('slug'))
# get movie name translation
language = self.config.get('language')
if list_type == 'movie' and language:
endpoint = ['movies', entry['trakt_movie_id'], 'translations', language]
try:
result = self.session.get(get_api_url(endpoint))
try:
translation = result.json()
except ValueError:
raise plugin.PluginError('Error decoding movie translation from trakt: %s.' % result.text)
except RequestException as e:
raise plugin.PluginError('Could not retrieve movie translation from trakt: %s' % str(e))
if not translation:
log.warning('No translation data returned from trakt for movie %s.', entry['title'])
else:
log.verbose('Found `%s` translation for movie `%s`: %s',
language, entry['movie_name'], translation[0]['title'])
entry['title'] = translation[0]['title'] + ' (' + entry['movie_year'] + ')'
entry['movie_name'] = translation[0]['title']
entry.update_using_map(field_maps[list_type], item)
# Override the title if strip_dates is on. TODO: a better way?
if self.config.get('strip_dates'):
if list_type in ['show', 'movie']:
entry['title'] = item[list_type]['title']
elif list_type == 'episode':
entry['title'] = '{show[title]} S{episode[season]:02}E{episode[number]:02}'.format(**item)
if item['episode']['title']:
entry['title'] += ' {episode[title]}'.format(**item)
if entry.isvalid():
if self.config.get('strip_dates'):
# Remove year from end of name if present
entry['title'] = split_title_year(entry['title'])[0]
entries.append(entry)
else:
log.debug('Invalid entry created? %s', entry)
self._items = entries
return self._items
def invalidate_cache(self):
self._items = None
def get_list_endpoint(self, remove=False, submit=False):
# Api restriction, but we could easily extract season and episode info from the 'shows' type
if not submit and self.config['list'] in ['collection', 'watched'] and self.config['type'] == 'episodes':
raise plugin.PluginError('`type` cannot be `%s` for %s list.' % (self.config['type'], self.config['list']))
if self.config['list'] in ['collection', 'watchlist', 'watched', 'ratings']:
if self.config.get('account'):
if self.config['list'] == 'watched':
endpoint = ('sync', 'history')
else:
endpoint = ('sync', self.config['list'])
if not submit:
endpoint += (self.config['type'], )
else:
endpoint = ('users', self.config['username'], self.config['list'], self.config['type'])
else:
endpoint = ('users', self.config['username'], 'lists', make_list_slug(self.config['list']), 'items')
if remove:
endpoint += ('remove', )
return endpoint
def show_match(self, entry1, entry2):
return any(entry1.get(ident) is not None and entry1[ident] == entry2.get(ident) for ident in
['series_name', 'trakt_show_id', 'tmdb_id', 'tvdb_id', 'imdb_id', 'tvrage_id'])
def season_match(self, entry1, entry2):
return (self.show_match(entry1, entry2) and entry1.get('series_season') is not None and
entry1['series_season'] == entry2.get('series_season'))
def episode_match(self, entry1, entry2):
return (self.season_match(entry1, entry2) and entry1.get('series_episode') is not None and
entry1['series_episode'] == entry2.get('series_episode'))
def movie_match(self, entry1, entry2):
if any(entry1.get(id) is not None and entry1[id] == entry2[id] for id in
['trakt_movie_id', 'imdb_id', 'tmdb_id']):
return True
if entry1.get('movie_name') and ((entry1.get('movie_name'), entry1.get('movie_year')) ==
(entry2.get('movie_name'), entry2.get('movie_year'))):
return True
return False
def submit(self, entries, remove=False):
"""Submits movies or episodes to trakt api."""
found = {}
for entry in entries:
if self.config['type'] in ['auto', 'shows', 'seasons', 'episodes'] and entry.get('series_name'):
show_name, show_year = split_title_year(entry['series_name'])
show = {'title': show_name, 'ids': get_entry_ids(entry)}
if show_year:
show['year'] = show_year
if self.config['type'] in ['auto', 'seasons', 'episodes'] and entry.get('series_season') is not None:
season = {'number': entry['series_season']}
if self.config['type'] in ['auto', 'episodes'] and entry.get('series_episode') is not None:
season['episodes'] = [{'number': entry['series_episode']}]
show['seasons'] = [season]
if self.config['type'] in ['seasons', 'episodes'] and 'seasons' not in show:
log.debug('Not submitting `%s`, no season found.', entry['title'])
continue
if self.config['type'] == 'episodes' and 'episodes' not in show['seasons'][0]:
log.debug('Not submitting `%s`, no episode number found.', entry['title'])
continue
found.setdefault('shows', []).append(show)
elif self.config['type'] in ['auto', 'movies']:
movie = {'ids': get_entry_ids(entry)}
if not movie['ids']:
if entry.get('movie_name') is not None:
movie['title'] = entry.get('movie_name') or entry.get('imdb_name')
movie['year'] = entry.get('movie_year') or entry.get('imdb_year')
else:
log.debug('Not submitting `%s`, no movie name or id found.', entry['title'])
continue
found.setdefault('movies', []).append(movie)
if not (found.get('shows') or found.get('movies')):
log.debug('Nothing to submit to trakt.')
return
url = get_api_url(self.get_list_endpoint(remove, submit=True))
log.debug('Submitting data to trakt.tv (%s): %s', url, found)
try:
result = self.session.post(url, data=json.dumps(found), raise_status=False)
except RequestException as e:
log.error('Error submitting data to trakt.tv: %s', e)
return
if 200 <= result.status_code < 300:
action = 'deleted' if remove else 'added'
res = result.json()
# Default to 0 for all categories, even if trakt response didn't include them
for cat in ('movies', 'shows', 'episodes', 'seasons'):
res[action].setdefault(cat, 0)
log.info('Successfully {0} to/from list {1}: {movies} movie(s), {shows} show(s), {episodes} episode(s), '
'{seasons} season(s).'.format(action, self.config['list'], **res[action]))
for media_type, request in res['not_found'].items():
if request:
log.debug('not found %s: %s', media_type, request)
# TODO: Improve messages about existing and unknown results
# Mark the results expired if we added or removed anything
if sum(res[action].values()):
self.invalidate_cache()
elif result.status_code == 404:
log.error('List does not appear to exist on trakt: %s', self.config['list'])
elif result.status_code == 401:
log.error('Authentication error: have you authorized Flexget on Trakt.tv?')
log.debug('trakt response: %s', result.text)
else:
log.error('Unknown error submitting data to trakt.tv: %s', result.text)
@property
def online(self):
""" Set the online status of the plugin, online plugin should be treated differently in certain situations,
like test mode"""
return True
class TraktList(object):
schema = TraktSet.schema
def get_list(self, config):
return TraktSet(config)
# TODO: we should somehow invalidate this cache when the list is modified
@cached('trakt_list', persist='2 hours')
def on_task_input(self, task, config):
return list(TraktSet(config))
@event('plugin.register')
def register_plugin():
plugin.register(TraktList, 'trakt_list', api_ver=2, interfaces=['task', 'list'])
| |
"""
Operator to convert lazyflow slots into pylearn2 datasets.
"""
from functools import wraps
import logging
import numpy as np
from lazyflow.operator import Operator, InputSlot, OutputSlot
from lazyflow.rtype import SubRegion
from pylearn2.datasets import Dataset
from pylearn2.space import CompositeSpace
from pylearn2.utils.iteration import SubsetIterator
from pylearn2.utils.iteration import ShuffledSequentialSubsetIterator
LOGGER = logging.getLogger(__name__)
# docstring is applied by @wraps
# pylint: disable=C0111
def _assert_input_ready(method):
"""
wrapper for OpDataset methods to prevent usage before input is ready
"""
@wraps(method)
def wrapped(self, *args, **kwargs):
assert len(self.Input) == 2, "input slot needs data and target"
assert self.Input[0].ready() and self.Input[1].ready(),\
"input is not ready, can't use dataset yet"
return method(self, *args, **kwargs)
return wrapped
def _warn_if_unwise(method):
"""
warns if a method is going to have low performance on a lazyflow dataset
"""
@wraps(method)
def wrapped(self, *args, **kwargs):
num_examples = self.Input[0].meta.shape[0]
num_channels = self.Input[0].meta.shape[0]
if num_examples*num_channels*4 > 1024**3:
msg = "requested non-lazy processing for large dataset"
LOGGER.warn(msg)
return method(self, *args, **kwargs)
return wrapped
# pylint: enable=C0111
class OpDataset(Operator, Dataset):
"""
converts an input slot to a pylearn2 dataset
the Output slot simply replicates the input slot, accessing data is done
via the pylearn2.dataset.Dataset interface
"""
Input = InputSlot(level=1)
Output = OutputSlot(level=1)
slotNames = dict(features=0, targets=1)
_num_examples = -1
def __init__(self, *args, **kwargs):
Operator.__init__(self, *args, **kwargs)
self.Output.connect(self.Input)
def setupOutputs(self):
self._num_examples = self.Input[0].meta.shape[0]
assert self._num_examples == self.Input[1].meta.shape[0]
# self._setupDataset()
def propagateDirty(self, slot, subindex, roi):
self.Output.setDirty(roi)
def execute(self, slot, subindex, roi, result):
raise NotImplementedError("should not reach this method")
# METHODS FOR DATASET
def has_targets(self):
return True
@_assert_input_ready
def get_num_examples(self):
return self._num_examples
# we need all those arguments
# pylint: disable=R0913
@_assert_input_ready
def iterator(self, mode=None, batch_size=None, num_batches=None, rng=None,
data_specs=None, return_tuple=False):
if mode is None:
mode = "sequential"
supported_modes = {
"sequential": self._get_sequential_iterator,
"shuffled_sequential": self._get_shuffled_iterator}
if mode not in supported_modes:
msg = "mode '{}' not implemented".format(mode)
mode = "sequential"
msg += ", defaulting to '{}'".format(mode)
LOGGER.warn(msg)
return supported_modes[mode](batch_size=batch_size,
num_batches=num_batches,
rng=rng, data_specs=data_specs,
return_tuple=return_tuple)
def _get_sequential_iterator(self, batch_size=None, num_batches=None,
rng=None, data_specs=None,
return_tuple=False):
"""
construct a pylearn2 SubsetIterator for this dataset specification
"""
if rng is not None:
LOGGER.warn("rng handling not implemented")
num_examples = int(self._num_examples)
if batch_size is None:
if num_batches is None:
batch_size = num_examples
num_batches = 1
else:
batch_size = int(np.ceil(float(num_examples)/num_batches))
else:
num_batches = int(np.ceil(float(num_examples)/batch_size))
data_types = self._get_data_types(data_specs)
shapes = [tuple(s.meta.shape)[1:] for s in self.Input]
def _iter():
"""
internal iterator, to be used by _Iterator
"""
for batch in range(num_batches):
left = batch*batch_size
right = (batch+1)*batch_size
right = min(right, num_examples)
ret = []
for data_type in data_types:
shape = shapes[data_type]
start = (left,) + (0,)*len(shape)
stop = (right,) + shape
new_roi = SubRegion(self.Input, start=start, stop=stop)
batch = self.Input[data_type].get(new_roi).wait()
# theano needs float32
batch = batch.astype(np.float32)
ret.append(batch)
if return_tuple or len(ret) > 1:
yield tuple(ret)
else:
assert len(ret) == 1
yield ret[0]
return _Iterator(batch_size, num_batches, self._num_examples,
_iter())
@_warn_if_unwise
def _get_shuffled_iterator(self, batch_size=None, num_batches=None,
rng=None, data_specs=None, return_tuple=False):
"""
iterate over dataset with randomly selected connected batches
"""
features = self.Input[0][...].wait()
target = self.Input[1][...].wait()
data = (features, target)
index_iter = ShuffledSequentialSubsetIterator(
len(features), batch_size, num_batches, rng=rng)
data_types = self._get_data_types(data_specs)
def _iter():
"""
internal iterator for _Iterator
"""
for indices in index_iter:
ret = []
for data_type in data_types:
temp = data[data_type][indices, ...]
temp = temp.astype(np.float32)
ret.append(temp)
if return_tuple or len(ret) > 1:
yield tuple(ret)
else:
assert len(ret) == 1
yield ret[0]
iter_ = _Iterator(batch_size, num_batches, self._num_examples,
_iter())
iter_.stochastic = True
return iter_
def _get_data_types(self, data_specs):
"""
get a mapping of type to channel index
"""
# default data returned is 'features'
data_types = (0,)
if data_specs is not None:
assert len(data_specs) == 2
space = data_specs[0]
if isinstance(space, CompositeSpace):
data_types = [self.slotNames[k] for k in data_specs[1]]
else:
data_types = (self.slotNames[data_specs[1]],)
return data_types
# we don't want to call super().__init__!
# pylint: disable=W0231
class _Iterator(SubsetIterator):
"""
wrapper around a python iterator over batches
pylearn2 insists on using its custom iterator class rather than the python
__iter__ pattern, play along.
"""
def __init__(self, batch_size, num_batches, num_examples, it):
self._batch_size = batch_size
self._num_batches = num_batches
self._num_examples = num_examples
self._uneven = True
self._it = iter(it)
def __iter__(self):
return self
def next(self):
return self._it.next()
@property
def num_examples(self):
return self._num_examples
@property
def uneven(self):
return self._uneven
| |
"""SECCOMP policy.
This policy is based on the default Docker SECCOMP policy profile. It
allows several syscalls, which are most commonly used. We make one major
change regarding the network-related ``socket`` syscall in that we only
allow AF_INET/AF_INET6 SOCK_DGRAM/SOCK_STREAM sockets for TCP and UDP
protocols.
"""
# pylint: disable=too-many-lines
SECCOMP_POLICY = {
"defaultAction": "SCMP_ACT_ERRNO",
"architectures": [
"SCMP_ARCH_X86_64",
"SCMP_ARCH_X86",
"SCMP_ARCH_X32"
],
"syscalls": [
{
"name": "accept",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "accept4",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "access",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "alarm",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "bind",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "brk",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "capget",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "capset",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "chdir",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "chmod",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "chown",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "chown32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "clock_getres",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "clock_gettime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "clock_nanosleep",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "close",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "connect",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "copy_file_range",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "creat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "dup",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "dup2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "dup3",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_create",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_create1",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_ctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_ctl_old",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_pwait",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_wait",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "epoll_wait_old",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "eventfd",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "eventfd2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "execve",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "execveat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "exit",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "exit_group",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "faccessat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fadvise64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fadvise64_64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fallocate",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fanotify_mark",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchdir",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchmod",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchmodat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchown",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchown32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fchownat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fcntl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fcntl64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fdatasync",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fgetxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "flistxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "flock",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fork",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fremovexattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fsetxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fstat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fstat64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fstatat64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fstatfs",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fstatfs64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "fsync",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ftruncate",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ftruncate64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "futex",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "futimesat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getcpu",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getcwd",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getdents",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getdents64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getegid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getegid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "geteuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "geteuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getgid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getgroups",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getgroups32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getitimer",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getpeername",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getpgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getpgrp",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getpid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getppid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getpriority",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getrandom",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getresgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getresgid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getresuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getresuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getrlimit",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "get_robust_list",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getrusage",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getsid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getsockname",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getsockopt",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "get_thread_area",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "gettid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "gettimeofday",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "getxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "inotify_add_watch",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "inotify_init",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "inotify_init1",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "inotify_rm_watch",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "io_cancel",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ioctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "io_destroy",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "io_getevents",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ioprio_get",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ioprio_set",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "io_setup",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "io_submit",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ipc",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "kill",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lchown",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lchown32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lgetxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "link",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "linkat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "listen",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "listxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "llistxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "_llseek",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lremovexattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lseek",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lsetxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lstat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "lstat64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "madvise",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "memfd_create",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mincore",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mkdir",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mkdirat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mknod",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mknodat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mlock",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mlock2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mlockall",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mmap",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mmap2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mprotect",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_getsetattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_notify",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_open",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_timedreceive",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_timedsend",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mq_unlink",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "mremap",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "msgctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "msgget",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "msgrcv",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "msgsnd",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "msync",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "munlock",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "munlockall",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "munmap",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "nanosleep",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "newfstatat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "_newselect",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "open",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "openat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pause",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "personality",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 0,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
]
},
{
"name": "personality",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 8,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
]
},
{
"name": "personality",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 4294967295,
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
}
]
},
{
"name": "pipe",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pipe2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "poll",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ppoll",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "prctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pread64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "preadv",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "prlimit64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pselect6",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pwrite64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "pwritev",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "read",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "readahead",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "readlink",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "readlinkat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "readv",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "recv",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "recvfrom",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "recvmmsg",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "recvmsg",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "remap_file_pages",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "removexattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rename",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "renameat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "renameat2",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "restart_syscall",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rmdir",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigaction",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigpending",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigprocmask",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigqueueinfo",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigreturn",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigsuspend",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_sigtimedwait",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "rt_tgsigqueueinfo",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_getaffinity",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_getattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_getparam",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_get_priority_max",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_get_priority_min",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_getscheduler",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_rr_get_interval",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_setaffinity",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_setattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_setparam",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_setscheduler",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sched_yield",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "seccomp",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "select",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "semctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "semget",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "semop",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "semtimedop",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "send",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sendfile",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sendfile64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sendmmsg",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sendmsg",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sendto",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setfsgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setfsgid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setfsuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setfsuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setgid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setgroups",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setgroups32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setitimer",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setpgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setpriority",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setregid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setregid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setresgid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setresgid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setresuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setresuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setreuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setreuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setrlimit",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "set_robust_list",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setsid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setsockopt",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "set_thread_area",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "set_tid_address",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setuid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setuid32",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "setxattr",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "shmat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "shmctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "shmdt",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "shmget",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "shutdown",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sigaltstack",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "signalfd",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "signalfd4",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sigreturn",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "socketpair",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "splice",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "stat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "stat64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "statfs",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "statfs64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "symlink",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "symlinkat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sync",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sync_file_range",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "syncfs",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "sysinfo",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "syslog",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "tee",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "tgkill",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "time",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timer_create",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timer_delete",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timerfd_create",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timerfd_gettime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timerfd_settime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timer_getoverrun",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timer_gettime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "timer_settime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "times",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "tkill",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "truncate",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "truncate64",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "ugetrlimit",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "umask",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "uname",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "unlink",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "unlinkat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "utime",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "utimensat",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "utimes",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "vfork",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "vmsplice",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "wait4",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "waitid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "waitpid",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "write",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "writev",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "arch_prctl",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "modify_ldt",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "chroot",
"action": "SCMP_ACT_ALLOW",
"args": []
},
{
"name": "clone",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 2080505856,
"valueTwo": 0,
"op": "SCMP_CMP_MASKED_EQ"
}
]
},
{
"name": "socket",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 12, # AF_INET
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
{
"index": 1,
"value": 1, # SOCK_STREAM
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
{
"index": 2,
"value": 6, # IPPROTO_TCP
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
]
},
{
"name": "socket",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 2, # AF_INET
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
{
"index": 1,
"value": 2, # SOCK_DGRAM
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
{
"index": 2,
"value": 17, # IPPROTO_UDP
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
]
},
{
"name": "socket",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 10, # AF_INET6
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
{
"index": 1,
"value": 1, # SOCK_STREAM
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
{
"index": 2,
"value": 6, # IPPROTO_TCP
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
]
},
{
"name": "socket",
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 10, # AF_INET6
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
{
"index": 1,
"value": 2, # SOCK_DGRAM
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
{
"index": 2,
"value": 17, # IPPROTO_UDP
"valueTwo": 0,
"op": "SCMP_CMP_EQ"
},
]
},
]
}
| |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tempfile
from polyaxon import settings
from polyaxon.auxiliaries import (
get_default_init_container,
get_default_sidecar_container,
)
from polyaxon.connections.kinds import V1ConnectionKind
from polyaxon.connections.schemas import V1BucketConnection, V1K8sResourceSchema
from polyaxon.exceptions import PolyaxonCompilerError
from polyaxon.managers.agent import AgentConfigManager
from polyaxon.polyaxonfile.specs import kinds
from polyaxon.polyflow import V1CompiledOperation, V1RunKind
from polyaxon.polypod.compiler.resolver import BaseResolver
from polyaxon.schemas.cli.agent_config import AgentConfig
from polyaxon.schemas.types import V1ConnectionType, V1K8sResourceType
from polyaxon.utils.test_utils import BaseTestCase
@pytest.mark.polypod_mark
class TestResolver(BaseTestCase):
def setUp(self):
super().setUp()
self.compiled_operation = V1CompiledOperation.read(
{
"version": 1.1,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
)
def test_core_resolver_instance(self):
resolver = BaseResolver(
run=None,
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
assert resolver.project_uuid == resolver.project_name
assert resolver.run_uuid == resolver.run_name
resolver = BaseResolver(
run=None,
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
run_name="j1",
run_path="test",
project_uuid="some_uuid",
run_uuid="some_uuid",
params=None,
)
assert resolver.project_uuid != resolver.project_name
assert resolver.run_uuid != resolver.run_name
def test_resolve_connections_with_no_config(self):
settings.AGENT_CONFIG = None
resolver = BaseResolver(
run=None,
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
with self.assertRaises(PolyaxonCompilerError):
resolver.resolve_connections()
def test_resolve_without_compiled_operation(self):
with self.assertRaises(PolyaxonCompilerError):
BaseResolver(
run=None,
compiled_operation=None,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
def test_resolve_connections_with_invalid_config(self):
fpath = tempfile.mkdtemp()
AgentConfigManager.CONFIG_PATH = fpath
secret1 = V1K8sResourceType(
name="secret1",
schema=V1K8sResourceSchema(name="secret1"),
is_requested=True,
)
secret2 = V1K8sResourceType(
name="secret2",
schema=V1K8sResourceSchema(name="secret2"),
is_requested=True,
)
connection1 = V1ConnectionType(
name="test_s3",
kind=V1ConnectionKind.S3,
schema=V1BucketConnection(bucket="s3//:foo"),
secret=secret1.schema,
)
connection2 = V1ConnectionType(
name="test_gcs",
kind=V1ConnectionKind.GCS,
schema=V1BucketConnection(bucket="gcs//:foo"),
secret=secret1.schema,
)
connection3 = V1ConnectionType(
name="test_wasb",
kind=V1ConnectionKind.WASB,
schema=V1BucketConnection(bucket="wasbs//:foo"),
secret=secret2.schema,
)
settings.AGENT_CONFIG = AgentConfig(
namespace="foo",
artifacts_store=connection1,
connections=[connection2, connection3],
)
resolver = BaseResolver(
run=None,
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
resolver.resolve_connections()
assert resolver.namespace == "foo"
assert resolver.connection_by_names == {connection1.name: connection1}
assert resolver.artifacts_store == connection1
assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema]
assert resolver.polyaxon_sidecar == get_default_sidecar_container()
assert resolver.polyaxon_init == get_default_init_container()
# Add run spec to resolve connections
compiled_operation = V1CompiledOperation.read(
{
"version": 1.1,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"connections": {connection3.name},
},
}
)
resolver = BaseResolver(
run=None,
compiled_operation=compiled_operation,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
resolver.resolve_connections()
assert resolver.namespace == "foo"
assert resolver.connection_by_names == {
connection1.name: connection1,
connection3.name: connection3,
}
assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema]
assert resolver.artifacts_store == connection1
assert resolver.polyaxon_sidecar == get_default_sidecar_container()
assert resolver.polyaxon_init == get_default_init_container()
# Add run spec to resolve connections
compiled_operation = V1CompiledOperation.read(
{
"version": 1.1,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"connections": {
connection1.name,
connection2.name,
connection3.name,
},
},
}
)
resolver = BaseResolver(
run=None,
compiled_operation=compiled_operation,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
resolver.resolve_connections()
assert resolver.namespace == "foo"
assert resolver.connection_by_names == {
connection3.name: connection3,
connection2.name: connection2,
connection1.name: connection1,
}
assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema]
assert resolver.artifacts_store == connection1
assert resolver.polyaxon_sidecar == get_default_sidecar_container()
assert resolver.polyaxon_init == get_default_init_container()
| |
import logging
import functools
import operator
l = logging.getLogger("claripy.backends.backend_vsa")
from . import Backend, BackendError
from ..vsa import RegionAnnotation
def arg_filter(f):
@functools.wraps(f)
def filter(*args): #pylint:disable=redefined-builtin
if type(args[0]) in {int, long}: #pylint:disable=unidiomatic-typecheck
raise BackendError('Unsupported argument type %s' % type(args[0]))
return f(*args)
return filter
def normalize_arg_order(f):
@functools.wraps(f)
def normalizer(*args):
if len(args) != 2:
raise BackendError('Unsupported arguments number %d' % len(args))
if type(args[0]) not in { StridedInterval, DiscreteStridedIntervalSet, ValueSet }: #pylint:disable=unidiomatic-typecheck
if type(args[1]) not in { StridedInterval, DiscreteStridedIntervalSet, ValueSet }: #pylint:disable=unidiomatic-typecheck
raise BackendError('Unsupported arguments')
args = [args[1], args[0]]
return f(*args)
return normalizer
def convert_args(f):
@functools.wraps(f)
def converter(self, ast):
raw_args = []
for i in xrange(len(ast.args)):
# It's not reversed
raw_args.append(ast.args[i])
for i in xrange(len(raw_args)):
raw_args[i] = self.convert(raw_args[i])
normalized = ast.swap_args(raw_args)
ret = f(self, normalized)
return ret
return converter
class BackendVSA(Backend):
def __init__(self):
Backend.__init__(self)
# self._make_raw_ops(set(expression_operations) - set(expression_set_operations), op_module=BackendVSA)
self._make_expr_ops(set(expression_set_operations), op_class=self)
self._make_raw_ops(set(backend_operations_vsa_compliant), op_module=BackendVSA)
self._op_raw['StridedInterval'] = BackendVSA.CreateStridedInterval
self._op_raw['ValueSet'] = ValueSet.__init__
self._op_raw['AbstractLocation'] = AbstractLocation.__init__
self._op_raw['Reverse'] = BackendVSA.Reverse
self._op_raw['If'] = self.If
self._op_expr['BVV'] = self.BVV
self._op_expr['BoolV'] = self.BoolV
self._op_expr['BVS'] = self.BVS
# reduceable
self._op_raw['__add__'] = self._op_add
self._op_raw['__sub__'] = self._op_sub
self._op_raw['__mul__'] = self._op_mul
self._op_raw['__or__'] = self._op_or
self._op_raw['__xor__'] = self._op_xor
self._op_raw['__and__'] = self._op_and
@staticmethod
def _op_add(*args):
return reduce(operator.__add__, args)
@staticmethod
def _op_sub(*args):
return reduce(operator.__sub__, args)
@staticmethod
def _op_mul(*args):
return reduce(operator.__mul__, args)
@staticmethod
def _op_or(*args):
return reduce(operator.__or__, args)
@staticmethod
def _op_xor(*args):
return reduce(operator.__xor__, args)
@staticmethod
def _op_and(*args):
return reduce(operator.__and__, args)
def convert(self, expr):
return Backend.convert(self, expr.ite_excavated if isinstance(expr, Base) else expr)
def _convert(self, a):
if type(a) in { int, long }: #pylint:disable=unidiomatic-typecheck
return a
if type(a) is bool:
return TrueResult() if a else FalseResult()
if type(a) in { StridedInterval, DiscreteStridedIntervalSet, ValueSet }: #pylint:disable=unidiomatic-typecheck
return a
if isinstance(a, BoolResult):
return a
# Not supported
raise BackendError()
def _eval(self, expr, n, extra_constraints=(), solver=None, model_callback=None):
if isinstance(expr, StridedInterval):
return expr.eval(n)
elif isinstance(expr, ValueSet):
return expr.eval(n)
elif isinstance(expr, BoolResult):
return expr.value
else:
raise BackendError('Unsupported type %s' % type(expr))
def _min(self, expr, extra_constraints=(), solver=None, model_callback=None):
if isinstance(expr, StridedInterval):
if expr.is_top:
# TODO: Return
return 0
return expr.min
else:
raise BackendError('Unsupported expr type %s' % type(expr))
def _max(self, expr, extra_constraints=(), solver=None, model_callback=None):
if isinstance(expr, StridedInterval):
if expr.is_top:
# TODO:
return StridedInterval.max_int(expr.bits)
return expr.max
else:
raise BackendError('Unsupported expr type %s' % type(expr))
def _solution(self, obj, v, extra_constraints=(), solver=None, model_callback=None):
if isinstance(obj, BoolResult):
return len(set(v.value) & set(obj.value)) > 0
if isinstance(obj, StridedInterval):
return not obj.intersection(v).is_empty
if isinstance(obj, ValueSet):
for _, si in obj.items():
if not si.intersection(v).is_empty:
return True
return False
raise NotImplementedError(type(obj).__name__)
def _has_true(self, o, extra_constraints=(), solver=None, model_callback=None):
return BoolResult.has_true(o)
def _has_false(self, o, extra_constraints=(), solver=None, model_callback=None):
return BoolResult.has_false(o)
def _is_true(self, o, extra_constraints=(), solver=None, model_callback=None):
return BoolResult.is_true(o)
def _is_false(self, o, extra_constraints=(), solver=None, model_callback=None):
return BoolResult.is_false(o)
#
# Backend Operations
#
def simplify(self, e):
raise BackendError('nope')
def _identical(self, a, b):
if type(a) != type(b):
return False
return a.identical(b)
def _unique(self, obj): #pylint:disable=unused-argument,no-self-use
if isinstance(obj, StridedInterval):
return obj.unique
elif isinstance(obj, ValueSet):
return obj.unique
else:
raise BackendError('Not supported type of operand %s' % type(obj))
def _cardinality(self, a): #pylint:disable=unused-argument,no-self-use
return a.cardinality
def name(self, a):
if isinstance(a, StridedInterval):
return a.name
else:
return None
def apply_annotation(self, bo, annotation):
"""
Apply an annotation on the backend object.
:param BackendObject bo: The backend object.
:param Annotation annotation: The annotation to be applied
:return: A new BackendObject
:rtype: BackendObject
"""
# Currently we only support RegionAnnotation
if not isinstance(annotation, RegionAnnotation):
return bo
if not isinstance(bo, ValueSet):
# Convert it to a ValueSet first
# Note that the original value is not kept at all. If you want to convert a StridedInterval to a ValueSet,
# you gotta do the conversion by calling AST.annotate() from outside.
bo = ValueSet.empty(bo.bits)
return bo.apply_annotation(annotation)
def BVV(self, ast): #pylint:disable=unused-argument,no-self-use
if ast.args[0] is None:
return StridedInterval.empty(ast.args[1])
else:
return CreateStridedInterval(bits=ast.args[1], stride=0, lower_bound=ast.args[0], upper_bound=ast.args[0])
@staticmethod
def BoolV(ast): #pylint:disable=unused-argument
return TrueResult() if ast.args[0] else FalseResult()
@staticmethod
def And(a, *args):
return reduce(operator.__and__, args, a)
@staticmethod
def Not(a):
return ~a
@staticmethod
@normalize_arg_order
def ULT(a, b):
return a.ULT(b)
@staticmethod
@normalize_arg_order
def ULE(a, b):
return a.ULE(b)
@staticmethod
@normalize_arg_order
def UGT(a, b):
return a.UGT(b)
@staticmethod
@normalize_arg_order
def UGE(a, b):
return a.UGE(b)
@staticmethod
@normalize_arg_order
def SLT(a, b):
return a.SLT(b)
@staticmethod
@normalize_arg_order
def SLE(a, b):
return a.SLE(b)
@staticmethod
@normalize_arg_order
def SGT(a, b):
return a.SGT(b)
@staticmethod
@normalize_arg_order
def SGE(a, b):
return a.SGE(b)
@staticmethod
def BVS(ast): #pylint:disable=unused-argument
size = ast.size()
name, mn, mx, stride, uninitialized, discrete_set, max_card = ast.args
return CreateStridedInterval(name=name, bits=size, lower_bound=mn, upper_bound=mx, stride=stride,
uninitialized=uninitialized, discrete_set=discrete_set,
discrete_set_max_cardinality=max_card)
def If(self, cond, t, f):
if not self.has_true(cond):
return f
elif not self.has_false(cond):
return t
else:
return t.union(f)
# TODO: Implement other operations!
@staticmethod
def Or(*args):
first = args[0]
others = args[1:]
for o in others:
first = first.union(o)
return first
@staticmethod
def __rshift__(expr, shift_amount): #pylint:disable=unexpected-special-method-signature
return expr.__rshift__(shift_amount)
@staticmethod
def LShR(expr, shift_amount):
return expr.LShR(shift_amount)
@staticmethod
def Concat(*args):
ret = None
for expr in args:
if type(expr) not in { StridedInterval, DiscreteStridedIntervalSet, ValueSet }: #pylint:disable=unidiomatic-typecheck
raise BackendError('Unsupported expr type %s' % type(expr))
ret = ret.concat(expr) if ret is not None else expr
return ret
@arg_filter
def _size(self, arg):
if type(arg) in { StridedInterval, DiscreteStridedIntervalSet, ValueSet }: #pylint:disable=unidiomatic-typecheck
return len(arg)
else:
return arg.size()
@staticmethod
def Extract(*args):
low_bit = args[1]
high_bit = args[0]
expr = args[2]
if type(expr) not in { StridedInterval, DiscreteStridedIntervalSet, ValueSet }: #pylint:disable=unidiomatic-typecheck
raise BackendError('Unsupported expr type %s' % type(expr))
ret = expr.extract(high_bit, low_bit)
return ret
@staticmethod
def SignExt(*args):
new_bits = args[0]
expr = args[1]
if type(expr) not in { StridedInterval, DiscreteStridedIntervalSet }: #pylint:disable=unidiomatic-typecheck
raise BackendError('Unsupported expr type %s' % type(expr))
return expr.sign_extend(new_bits + expr.bits)
@staticmethod
def ZeroExt(*args):
new_bits = args[0]
expr = args[1]
if type(expr) not in { StridedInterval, DiscreteStridedIntervalSet }: #pylint:disable=unidiomatic-typecheck
raise BackendError('Unsupported expr type %s' % type(expr))
return expr.zero_extend(new_bits + expr.bits)
@staticmethod
def Reverse(arg):
if type(arg) not in {StridedInterval, DiscreteStridedIntervalSet, ValueSet}: #pylint:disable=unidiomatic-typecheck
raise BackendError('Unsupported expr type %s' % type(arg))
return arg.reverse()
@convert_args
def union(self, ast): #pylint:disable=unused-argument,no-self-use
if len(ast.args) != 2:
raise BackendError('Incorrect number of arguments (%d) passed to BackendVSA.union().' % len(ast.args))
ret = ast.args[0].union(ast.args[1])
if ret is NotImplemented:
ret = ast.args[1].union(ast.args[0])
return ret
@convert_args
def intersection(self, ast): #pylint:disable=unused-argument,no-self-use
if len(ast.args) != 2:
raise BackendError('Incorrect number of arguments (%d) passed to BackendVSA.intersection().' % len(ast.args))
ret = None
for arg in ast.args:
if ret is None:
ret = arg
else:
ret = ret.intersection(arg)
return ret
@convert_args
def widen(self, ast): #pylint:disable=unused-argument,no-self-use
if len(ast.args) != 2:
raise BackendError('Incorrect number of arguments (%d) passed to BackendVSA.widen().' % len(ast.args))
ret = ast.args[0].widen(ast.args[1])
if ret is NotImplemented:
ret = ast.args[1].widen(ast.args[0])
return ret
@staticmethod
def CreateTopStridedInterval(bits, name=None, uninitialized=False): #pylint:disable=unused-argument,no-self-use
return StridedInterval.top(bits, name, uninitialized=uninitialized)
def constraint_to_si(self, expr):
return Balancer(self, expr).compat_ret
from ..ast.base import Base
from ..operations import backend_operations_vsa_compliant, expression_set_operations
from ..vsa import StridedInterval, CreateStridedInterval, DiscreteStridedIntervalSet, ValueSet, AbstractLocation, BoolResult, TrueResult, FalseResult
from ..balancer import Balancer
BackendVSA.CreateStridedInterval = staticmethod(CreateStridedInterval)
| |
import logging
import re
import netfilter.parser
"""
rule.py Author: Zach Bricker
A set of classes to allow the creation of rules to be used with IPTables
"""
re_extension_opt = re.compile(r'^--(.*)$')
class Extension:
def __init__(self, name, options, rewrite_options={}):
"""
Constructor
:param name
:param options
:param rewrite_options
:return
"""
self.__name = name
self.__options = {}
self.__rewrite_options = rewrite_options
if options:
self.__parse_options(options)
def __eq__(self, other):
"""
Rewrites built-in comparison function
:param other
:return self.__name == other.__name and \ self.__options == other.__options
:return NotImplemented
"""
if isinstance(other, Extension):
return self.__name == other.__name and \
self.__options == other.__options
else:
return NotImplemented
def __ne__(self, other):
"""
Rewrites built-in comparison function
:param other
:return result
:return not result
"""
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __parse_options(self, options):
"""
Parses options
:param options
:return
"""
if isinstance(options, list):
bits = options
else:
bits = netfilter.parser.split_words(options)
pos = 0
cur_opt = []
while pos < len(bits):
if bits[pos] == '!':
cur_opt.append(bits[pos])
pos += 1
continue
# get option name
m = re_extension_opt.match(bits[pos])
if not m:
raise Exception("expected option, got: %s" % bits[pos])
pos += 1
# rewrite option to its canonical name
tmp_opt = m.group(1)
if tmp_opt in self.__rewrite_options:
tmp_opt = self.__rewrite_options[tmp_opt]
cur_opt.append(tmp_opt)
# collect value(s)
vals = []
while pos < len(bits) and not re_extension_opt.match(bits[pos]):
vals.append(bits[pos])
pos += 1
# store option
opt = ' '.join(cur_opt)
self.__options[opt] = vals
# reset current option name
cur_opt = []
def log(self, level, prefix=''):
"""
Will log level and prefix with self.__name and self.__options
:param level
:param prefix
:return
"""
logging.log(level, "%sname: %s", prefix, self.__name)
logging.log(level, "%soptions: %s", prefix, self.__options)
def name(self):
"""
Returns name
:return self.__name
"""
return self.__name
def options(self):
"""
Returns options
:return self.__options
"""
return self.__options
def specbits(self):
"""
Returns bits
:return bits
"""
bits = []
for opt in sorted(self.__options):
# handle the case where this is a negated option
m = re.match(r'^! (.*)', opt)
if m:
bits.extend(['!', "--%s" % m.group(1)])
else:
bits.append("--%s" % opt)
optval = self.__options[opt]
if isinstance(optval, list):
bits.extend(optval)
else:
bits.append(optval)
return bits
class Match(Extension):
def __init__(self, name, options=None):
"""
Constructor
:param name
:param options
:return
"""
Extension.__init__(self, name, options, {
'destination-port': 'dport',
'destination-ports': 'dports',
'source-port': 'sport',
'source-ports': 'sports'})
class Target(Extension):
def __init__(self, name, options=None):
"""
Constructor
:param name
:param options
:return
"""
Extension.__init__(self, name, options)
class Rule:
def __init__(self, **kwargs):
"""
Constructor
:param **kwargs
:return
"""
# initialise rule definition
self.protocol = None
self.destination = None
self.source = None
self.goto = None
self.jump = None
self.in_interface = None
self.out_interface = None
self.matches = []
# initialise counters
self.packets = 0
self.bytes = 0
# assign supplied arguments
for k, v in kwargs.items():
self.__setattr__(k, v)
def __eq__(self, other):
"""
Rewrites built-in comparison function
:param other
:return other.protocol == self.protocol and \
other.in_interface == self.in_interface and \
other.out_interface == self.out_interface and \
other.source == self.source and \
other.destination == self.destination and \
other.goto == self.goto and \
other.jump == self.jump and \
other.matches == self.matches
:return NotImplemented
"""
if isinstance(other, Rule):
return other.protocol == self.protocol and \
other.in_interface == self.in_interface and \
other.out_interface == self.out_interface and \
other.source == self.source and \
other.destination == self.destination and \
other.goto == self.goto and \
other.jump == self.jump and \
other.matches == self.matches
else:
return NotImplemented
def __ne__(self, other):
"""
Rewrites built-in comparison function
:param other
:return result
:return not result
"""
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __setattr__(self, name, value):
"""
Rewrites built-in attribute assignment function
:param name
:param value
:return
"""
if name == 'source' or name == 'destination':
# produce "canonical" form of a source / destination
# FIXME: we need to handle arbitrary netmasks here
if value is not None and value.endswith('/32'):
value = value[:-3]
elif name == 'goto' or name == 'jump':
if value is not None and not isinstance(value, Target):
value = Target(value)
elif name == 'matches':
if not isinstance(value, list):
raise Exception("matches attribute requires a list")
self.__dict__[name] = value
def find(self, rules):
"""
Returns a specific rule if it exists
:param rules
:return rule
:return None
"""
for rule in rules:
if self == rule:
return rule
return None
def log(self, level, prefix=''):
"""
Logs the rule
:param level
:param prefix
:return
"""
logging.log(level, "%sin interface: %s", prefix, self.in_interface)
logging.log(level, "%sout interface: %s", prefix, self.out_interface)
logging.log(level, "%ssource: %s", prefix, self.source)
logging.log(level, "%sdestination: %s", prefix, self.destination)
logging.log(level, "%smatches:", prefix)
for match in self.matches:
match.log(level, prefix + ' ')
if self.jump:
logging.log(level, "%sjump:", prefix)
self.jump.log(level, prefix + ' ')
def specbits(self):
"""
Returns bits
:return bits
"""
def host_bits(opt, optval):
"""
Returns an array of objects
:param opt
:param optval
:return ['!', opt, m.group(1)]
:return [opt, optval]
"""
# handle the case where this is a negated value
m = re.match(r'^!\s*(.*)', optval)
if m:
return ['!', opt, m.group(1)]
else:
return [opt, optval]
bits = []
if self.protocol:
bits.extend(host_bits('-p', self.protocol))
if self.in_interface:
bits.extend(host_bits('-i', self.in_interface))
if self.out_interface:
bits.extend(host_bits('-o', self.out_interface))
if self.source:
bits.extend(host_bits('-s', self.source))
if self.destination:
bits.extend(host_bits('-d', self.destination))
for mod in self.matches:
bits.extend(['-m', mod.name()])
bits.extend(mod.specbits())
if self.goto:
bits.extend(['-g', self.goto.name()])
bits.extend(self.goto.specbits())
elif self.jump:
bits.extend(['-j', self.jump.name()])
bits.extend(self.jump.specbits())
return bits
| |
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import pprint
import sys
import time
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent import rpc as agent_rpc
from neutron.common import config as common_config
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context as n_context
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task
from neutron.openstack.common import service
from neutron.openstack.common import timeutils
from neutron.plugins.cisco.cfg_agent import device_status
from neutron.plugins.cisco.common import cisco_constants as c_constants
from neutron import service as neutron_service
LOG = logging.getLogger(__name__)
# Constants for agent registration.
REGISTRATION_RETRY_DELAY = 2
MAX_REGISTRATION_ATTEMPTS = 30
class CiscoDeviceManagementApi(n_rpc.RpcProxy):
"""Agent side of the device manager RPC API."""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic, host):
super(CiscoDeviceManagementApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.host = host
def report_dead_hosting_devices(self, context, hd_ids=None):
"""Report that a hosting device cannot be contacted (presumed dead).
:param: context: session context
:param: hosting_device_ids: list of non-responding hosting devices
:return: None
"""
# Cast since we don't expect a return value.
self.cast(context,
self.make_msg('report_non_responding_hosting_devices',
host=self.host,
hosting_device_ids=hd_ids),
topic=self.topic)
def register_for_duty(self, context):
"""Report that a config agent is ready for duty."""
return self.call(context,
self.make_msg('register_for_duty',
host=self.host),
topic=self.topic)
class CiscoCfgAgent(manager.Manager):
"""Cisco Cfg Agent.
This class defines a generic configuration agent for cisco devices which
implement network services in the cloud backend. It is based on the
(reference) l3-agent, but has been enhanced to support multiple services
in addition to routing.
The agent acts like as a container for services and does not do any
service specific processing or configuration itself.
All service specific processing is delegated to service helpers which
the agent loads. Thus routing specific updates are processed by the
routing service helper, firewall by firewall helper etc.
A further layer of abstraction is implemented by using device drivers for
encapsulating all configuration operations of a service on a device.
Device drivers are specific to a particular device/service VM eg: CSR1kv.
The main entry points in this class are the `process_services()` and
`_backlog_task()` .
"""
RPC_API_VERSION = '1.1'
OPTS = [
cfg.IntOpt('rpc_loop_interval', default=10,
help=_("Interval when the process_services() loop "
"executes in seconds. This is when the config agent "
"lets each service helper to process its neutron "
"resources.")),
cfg.StrOpt('routing_svc_helper_class',
default='neutron.plugins.cisco.cfg_agent.service_helpers'
'.routing_svc_helper.RoutingServiceHelper',
help=_("Path of the routing service helper class.")),
]
def __init__(self, host, conf=None):
self.conf = conf or cfg.CONF
self._dev_status = device_status.DeviceStatus()
self.context = n_context.get_admin_context_without_session()
self._initialize_rpc(host)
self._initialize_service_helpers(host)
self._start_periodic_tasks()
super(CiscoCfgAgent, self).__init__(host=self.conf.host)
def _initialize_rpc(self, host):
self.devmgr_rpc = CiscoDeviceManagementApi(topics.L3PLUGIN, host)
def _initialize_service_helpers(self, host):
svc_helper_class = self.conf.cfg_agent.routing_svc_helper_class
try:
self.routing_service_helper = importutils.import_object(
svc_helper_class, host, self.conf, self)
except ImportError as e:
LOG.warn(_("Error in loading routing service helper. Class "
"specified is %(class)s. Reason:%(reason)s"),
{'class': self.conf.cfg_agent.routing_svc_helper_class,
'reason': e})
self.routing_service_helper = None
def _start_periodic_tasks(self):
self.loop = loopingcall.FixedIntervalLoopingCall(self.process_services)
self.loop.start(interval=self.conf.cfg_agent.rpc_loop_interval)
def after_start(self):
LOG.info(_("Cisco cfg agent started"))
def get_routing_service_helper(self):
return self.routing_service_helper
## Periodic tasks ##
@periodic_task.periodic_task
def _backlog_task(self, context):
"""Process backlogged devices."""
LOG.debug("Processing backlog.")
self._process_backlogged_hosting_devices(context)
## Main orchestrator ##
@lockutils.synchronized('cisco-cfg-agent', 'neutron-')
def process_services(self, device_ids=None, removed_devices_info=None):
"""Process services managed by this config agent.
This method is invoked by any of three scenarios.
1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL`
seconds. This is the most common scenario.
In this mode, the method is called without any arguments.
2. Called by the `_process_backlogged_hosting_devices()` as part of
the backlog processing task. In this mode, a list of device_ids
are passed as arguments. These are the list of backlogged
hosting devices that are now reachable and we want to sync services
on them.
3. Called by the `hosting_devices_removed()` method. This is when
the config agent has received a notification from the plugin that
some hosting devices are going to be removed. The payload contains
the details of the hosting devices and the associated neutron
resources on them which should be processed and removed.
To avoid race conditions with these scenarios, this function is
protected by a lock.
This method goes on to invoke `process_service()` on the
different service helpers.
:param device_ids : List of devices that are now available and needs
to be processed
:param removed_devices_info: Info about the hosting devices which
are going to be removed and details of the resources hosted on them.
Expected Format:
{
'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
'hd_id2': {'routers': [id3, id4, ...]}, ...},
'deconfigure': True/False
}
:return: None
"""
LOG.debug("Processing services started")
# Now we process only routing service, additional services will be
# added in future
if self.routing_service_helper:
self.routing_service_helper.process_service(device_ids,
removed_devices_info)
else:
LOG.warn(_("No routing service helper loaded"))
LOG.debug("Processing services completed")
def _process_backlogged_hosting_devices(self, context):
"""Process currently backlogged devices.
Go through the currently backlogged devices and process them.
For devices which are now reachable (compared to last time), we call
`process_services()` passing the now reachable device's id.
For devices which have passed the `hosting_device_dead_timeout` and
hence presumed dead, execute a RPC to the plugin informing that.
:param context: RPC context
:return: None
"""
res = self._dev_status.check_backlogged_hosting_devices()
if res['reachable']:
self.process_services(device_ids=res['reachable'])
if res['dead']:
LOG.debug("Reporting dead hosting devices: %s", res['dead'])
self.devmgr_rpc.report_dead_hosting_devices(context,
hd_ids=res['dead'])
def hosting_devices_removed(self, context, payload):
"""Deal with hosting device removed RPC message."""
try:
if payload['hosting_data']:
if payload['hosting_data'].keys():
self.process_services(removed_devices_info=payload)
except KeyError as e:
LOG.error(_("Invalid payload format for received RPC message "
"`hosting_devices_removed`. Error is %{error}s. "
"Payload is %(payload)s"),
{'error': e, 'payload': payload})
class CiscoCfgAgentWithStateReport(CiscoCfgAgent):
def __init__(self, host, conf=None):
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-cisco-cfg-agent',
'host': host,
'topic': c_constants.CFG_AGENT,
'configurations': {},
'start_flag': True,
'agent_type': c_constants.AGENT_TYPE_CFG}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
self._initialize_rpc(host)
self._agent_registration()
super(CiscoCfgAgentWithStateReport, self).__init__(host=host,
conf=conf)
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _agent_registration(self):
"""Register this agent with the server.
This method registers the cfg agent with the neutron server so hosting
devices can be assigned to it. In case the server is not ready to
accept registration (it sends a False) then we retry registration
for `MAX_REGISTRATION_ATTEMPTS` with a delay of
`REGISTRATION_RETRY_DELAY`. If there is no server response or a
failure to register after the required number of attempts,
the agent stops itself.
"""
for attempts in xrange(MAX_REGISTRATION_ATTEMPTS):
context = n_context.get_admin_context_without_session()
self.send_agent_report(self.agent_state, context)
res = self.devmgr_rpc.register_for_duty(context)
if res is True:
LOG.info(_("[Agent registration] Agent successfully "
"registered"))
return
elif res is False:
LOG.warn(_("[Agent registration] Neutron server said that "
"device manager was not ready. Retrying in %0.2f "
"seconds "), REGISTRATION_RETRY_DELAY)
time.sleep(REGISTRATION_RETRY_DELAY)
elif res is None:
LOG.error(_("[Agent registration] Neutron server said that no "
"device manager was found. Cannot "
"continue. Exiting!"))
raise SystemExit("Cfg Agent exiting")
LOG.error(_("[Agent registration] %d unsuccessful registration "
"attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS)
raise SystemExit("Cfg Agent exiting")
def _report_state(self):
"""Report state to the plugin.
This task run every `report_interval` period.
Collects, creates and sends a summary of the services currently
managed by this agent. Data is collected from the service helper(s).
Refer the `configurations` dict for the parameters reported.
:return: None
"""
LOG.debug("Report state task started")
configurations = {}
if self.routing_service_helper:
configurations = self.routing_service_helper.collect_state(
self.agent_state['configurations'])
non_responding = self._dev_status.get_backlogged_hosting_devices_info()
configurations['non_responding_hosting_devices'] = non_responding
self.agent_state['configurations'] = configurations
self.agent_state['local_time'] = str(timeutils.utcnow())
LOG.debug("State report data: %s", pprint.pformat(self.agent_state))
self.send_agent_report(self.agent_state, self.context)
def send_agent_report(self, report, context):
"""Send the agent report via RPC."""
try:
self.state_rpc.report_state(context, report, self.use_call)
report.pop('start_flag', None)
self.use_call = False
LOG.debug("Send agent report successfully completed")
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report. "
"State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed sending agent report!"))
def main(manager='neutron.plugins.cisco.cfg_agent.'
'cfg_agent.CiscoCfgAgentWithStateReport'):
conf = cfg.CONF
conf.register_opts(CiscoCfgAgent.OPTS, "cfg_agent")
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf.register_opts(interface.OPTS)
conf.register_opts(external_process.OPTS)
common_config.init(sys.argv[1:])
conf(project='neutron')
config.setup_logging()
server = neutron_service.Service.create(
binary='neutron-cisco-cfg-agent',
topic=c_constants.CFG_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager=manager)
service.launch(server).wait()
| |
import logging
import time
from sys import version_info
from functools import wraps
from threading import Event
from types import MethodType
from enum import IntEnum
from ..watchmen import watch
def action_valid_decorator_factory(state, protocol):
"""
This decorator factory is used to generate decorators which verify that
requested actions on a target, such as step(), stop(), read_register(),
write_register() and so on are actually executable.
:param state: A mask specifying the required state of the Target
:type state: An entry of the Enum TargetStates
:param protocol: The protocol required to execute the action.
:type protocol: str
"""
def decorator(func):
@wraps(func)
def check(self, *args, **kwargs):
if getattr(self.protocols, protocol) is None:
raise Exception(
"%s() requested but %s is undefined." %
(func.__name__, protocol))
if not self.state.value & state.value:
raise Exception("%s() requested but Target is %s" %
(func.__name__, TargetStates(self.state).name))
return func(self, *args, **kwargs)
return check
return decorator
def synchronize_state(*states, **kwargs):
"""
This decorator can be used to make sure that the target executed a desired
set of state transitions in an particular order.
This is useful, when the user explicitly requests the target to change
it's state and need an update notification on the transition itself.
Internally, this works by creating an event and using a watchmen to check
whether it was triggered.
:param *states: The desired states of the target
:param transition_optional: Also allow to return if the target is already
in the desired states, even if the transition
didn't happen
"""
def decorator(func):
@wraps(func)
def state_synchronizer(self, *args, **kwargs):
state = states[-1]
transition_optional = kwargs.get('transition_optional', False)
blocking = kwargs.get('blocking', True)
avatar = self.avatar
if blocking is True:
state_reached = Event()
def state_synchronize_cb(avatar, message, *args, **kwargs):
if message.origin == self:
if message.state == state:
state_reached.set()
avatar.watchmen.remove_watchman('UpdateState', w)
elif message.state == TargetStates.EXITED:
raise Exception("Target %s exited" % self.name)
w = avatar.watchmen.add('UpdateState', when='after',
callback=state_synchronize_cb)
if len(states) == 1:
ret = func(self, *args, **kwargs)
else:
ret = synchronize_state(*states[:-1])(func)(self, *args, **kwargs)
if blocking is True:
if not (transition_optional == True and self.state == state):
state_reached.wait()
return ret
return state_synchronizer
return decorator
class TargetStates(IntEnum):
"""
A simple Enum for the different states a target can be in.
"""
CREATED = 0x1
INITIALIZED = 0x2
STOPPED = 0x4
RUNNING = 0x8
SYNCING = 0x10
EXITED = 0x20
NOT_RUNNING = INITIALIZED | STOPPED
BREAKPOINT = SYNCING | STOPPED
class TargetRegs(object):
def __init__(self, target, register_dict):
self._target = target
self.__dict__.update(register_dict)
def __getattribute__(self, name):
if name == '_get_names' or name == '__dict__':
return super(TargetRegs, self).__getattribute__(name)
elif name in self._get_names():
return self._target.read_register(name)
else:
return super(TargetRegs, self).__getattribute__(name)
def __setattr__(self, name, value):
if name == '_target':
return super(TargetRegs, self).__setattr__(name, value)
elif name in self._get_names():
return self._target.write_register(name, value)
else:
return super(TargetRegs, self).__setattr__(name, value)
def _update(self, reg_dict):
self.__dict__.update(reg_dict)
def _get_nr_from_name(self, reg_name):
return self.__dict__[reg_name]
def _get_names(self):
names = set(self.__dict__) ^ set(['_target'])
return names
class TargetProtocolStore(object):
"""This class stores the various protocols associated to one target"""
DEFAULT_PROTOCOLS = ['memory', 'registers', 'execution']
def __init__(self, additional_protocols=None):
self.protocols = set(TargetProtocolStore.DEFAULT_PROTOCOLS)
self.protocols |= additional_protocols if additional_protocols else set()
self.unique_protocols = {} #Stores protocol references and their count
for p in self.protocols:
setattr(self, p, None)
def set_all(self, instance, only_defaults=False):
"""
Sets an instantiated protocol object for either all protocols in this
store, or only the default ones
:param instance: the protocol instance
"""
protocols = (TargetProtocolStore.DEFAULT_PROTOCOLS if only_defaults
else self.protocols
)
for p in protocols:
setattr(self, p, instance)
def shutdown(self):
"""Shutsdown all the associated protocols"""
for p in self.protocols:
#print("Unloading %s" % str(p))
setattr(self, p, None)
def __setattr__(self, name, value):
if name == 'protocols' or name == 'unique_protocols':
return super(TargetProtocolStore, self).__setattr__(name, value)
# Check whether the protocol is already an attribute
if hasattr(self, name) is False:
self.protocols.add(name)
saved_val = None
else:
saved_val = getattr(self, name)
if value is not None and self.unique_protocols.get(value, None) is None:
self.unique_protocols[value] = 0
if value is None and saved_val is not None:
self.unique_protocols[saved_val] -= 1
elif value is not None and saved_val is None:
self.unique_protocols[value] += 1
elif value is not None and saved_val is not None:
self.unique_protocols[value] += 1
self.unique_protocols[saved_val] -= 1
# if there is no reference left, let's shut the prot down
if saved_val is not None and self.unique_protocols[saved_val] == 0:
getattr(self, name).shutdown()
return super(TargetProtocolStore, self).__setattr__(name, value)
class Target(object):
"""The Target object is one of Avatars core concept, as Avatar orchestrate
different targets.
While the generic target has no implementation, it provides an insight over
all the functions a Target MUST implement
"""
def __init__(self, avatar, name=None): # type: ('Avatar', str) -> None
"""
Creates a new instance of a Target.
:param avatar: The avatar instance this target should be orchestrated by
:param name: The name of this target, mainly for logging. This is optional and will be autogenerated otherwise.
"""
super(Target, self).__init__()
self.state = TargetStates.CREATED
self.avatar = avatar
self.name = name if name else self._get_unique_name()
self.status = {}
self._arch = avatar.arch
self.protocols = TargetProtocolStore()
self.state = TargetStates.CREATED
self.log = logging.getLogger('%s.targets.%s' % (avatar.log.name, self.name))
log_file = logging.FileHandler('%s/%s.log' % (avatar.output_directory, self.name))
formatter = logging.Formatter('%(asctime)s | %(name)s.%(levelname)s | %(message)s')
log_file.setFormatter(formatter)
self.log.addHandler(log_file)
self.regs = TargetRegs(self, self._arch.registers)
def dictify(self):
"""
Returns the memory range as *printable* dictionary for the config
"""
ignore = ['state', 'status', 'regs', 'protocols', 'log', 'avatar']
ignored_types = (MethodType)
expected_types = (str, bool, int, list)
if version_info < (3, 0): expected_types += (unicode, )
t_dict = {'type': self.__class__.__name__,
'module': self.__module__}
for k, v in self.__dict__.items():
if k in ignore: continue
if k.startswith('_'): continue
if v is None: continue
if isinstance(v, ignored_types): continue
if not isinstance(v, expected_types):
raise Exception(
"Unsupported type %s for dictifying %s for target %s" %
(type(v), k, self.name))
t_dict[k] = v
return t_dict
@watch('TargetInit')
def init(self):
"""
Initializes the target to start the analyses
"""
pass
@watch('TargetShutdown')
def shutdown(self):
"""
Shutdowns the target
"""
self.protocols.shutdown()
@watch('TargetCont')
@action_valid_decorator_factory(TargetStates.STOPPED, 'execution')
@synchronize_state(TargetStates.RUNNING)
def cont(self, blocking=True):
"""
Continues the execution of the target
:param blocking: if True, block until the target is RUNNING
"""
return self.protocols.execution.cont()
@watch('TargetStop')
@action_valid_decorator_factory(TargetStates.RUNNING, 'execution')
@synchronize_state(TargetStates.STOPPED, transition_optional=True)
def stop(self, blocking=True):
return self.protocols.execution.stop()
@watch('TargetStep')
@action_valid_decorator_factory(TargetStates.STOPPED, 'execution')
@synchronize_state(TargetStates.RUNNING, TargetStates.STOPPED)
def step(self, blocking=True):
"""
Steps one instruction.
:param blocking: if True, block until the target is STOPPED again
"""
return self.protocols.execution.step()
@watch('TargetSetFile')
@action_valid_decorator_factory(TargetStates.STOPPED, 'execution')
def set_file(self, elf):
"""
Load an ELF file
:param elf: ELF file to load
:returns: True on success else False
"""
if not hasattr(self.protocols.execution, 'set_file'):
self.log.error('Protocol "' + type(self.protocols.execution).__name__ + '" does not support "set_file"')
return False
return self.protocols.execution.set_file(elf)
@watch('TargetDownload')
@action_valid_decorator_factory(TargetStates.STOPPED, 'execution')
def download(self):
"""
Download the loaded code to the Target
:returns: True on success else False
"""
if not hasattr(self.protocols.execution, 'download'):
self.log.error('Protocol "' + type(self.protocols.execution).__name__ + '" does not support "download"')
return False
return self.protocols.execution.download()
@watch('TargetGetSymbol')
@action_valid_decorator_factory(TargetStates.STOPPED, 'memory')
def get_symbol(self, symbol):
"""
Get the address of a symbol
:param symbol: The name of a symbol whose address is wanted
:returns: (True, Address) on success else False
"""
return self.protocols.memory.get_symbol(symbol)
@watch('TargetWriteMemory')
#@action_valid_decorator_factory(TargetStates.STOPPED, 'memory')
def write_memory(self, address, size, value, num_words=1, raw=False):
"""
Writing to memory of the target
:param address: The address from where the memory-write should
start
:param size: The size of the memory write
:param value: The actual value written to memory
:type val: int if num_words == 1 and raw == False
list if num_words > 1 and raw == False
str or byte if raw == True
:param num_words: The amount of words to read
:param raw: Specifies whether to write in raw or word mode
:returns: True on success else False
"""
try:
target_range = self.avatar.get_memory_range(address)
except Exception as e:
self.log.warn("Performing write on undefined range at 0x%x" % address)
target_range = None
if target_range is not None and target_range.forwarded is True and \
target_range.forwarded_to != self:
return target_range.forwarded_to.write_memory(address, size, value,
num_words, raw)
return self.protocols.memory.write_memory(address, size, value,
num_words, raw)
@watch('TargetReadMemory')
@action_valid_decorator_factory(TargetStates.STOPPED, 'memory')
def read_memory(self, address, size, num_words=1, raw=False):
"""
Reading from memory of the target
:param address: The address to read from
:param size: The size of a read word
:param num_words: The amount of words to read (default: 1)
:param raw: Whether the read memory is returned unprocessed
:return: The read memory
"""
try:
target_range = self.avatar.get_memory_range(address)
except Exception as e:
self.log.warn("Performing write on undefined range at 0x%x" % address)
target_range = None
if target_range is not None and target_range.forwarded is True and \
target_range.forwarded_to != self:
return target_range.forwarded_to.read_memory(address, size,
num_words, raw)
return self.protocols.memory.read_memory(address, size, num_words, raw)
@watch('TargetRegisterWrite')
#@action_valid_decorator_factory(TargetStates.STOPPED, 'registers')
def write_register(self, register, value):
"""
Writing a register to the target
:param register: The name of the register
:param value: The actual value written to the register
"""
return self.protocols.registers.write_register(register, value)
@watch('TargetRegisterRead')
#@action_valid_decorator_factory(TargetStates.STOPPED, 'registers')
def read_register(self, register):
"""
Reading a register from the target
:param register: The name of the register
:return: The actual value read from the register
"""
return self.protocols.registers.read_register(register)
@watch('TargetSetBreakpoint')
@action_valid_decorator_factory(TargetStates.NOT_RUNNING, 'execution')
def set_breakpoint(self, line, hardware=False, temporary=False, regex=False,
condition=None, ignore_count=0, thread=0, **kwargs):
"""Inserts a breakpoint
:param bool hardware: Hardware breakpoint
:param bool tempory: Tempory breakpoint
:param str regex: If set, inserts breakpoints matching the regex
:param str condition: If set, inserts a breakpoint with the condition
:param int ignore_count: Amount of times the bp should be ignored
:param int thread: Threadno in which this breakpoints should be added
"""
return self.protocols.execution.set_breakpoint(line, hardware=hardware,
temporary=temporary,
regex=regex,
condition=condition,
ignore_count=ignore_count,
thread=thread, **kwargs)
@watch('TargetSetWatchPoint')
@action_valid_decorator_factory(TargetStates.NOT_RUNNING, 'execution')
def set_watchpoint(self, variable, write=True, read=False):
"""Inserts a watchpoint
:param variable: The name of a variable or an address to watch
:param bool write: Write watchpoint
:param bool read: Read watchpoint
"""
return self.protocols.execution.set_watchpoint(variable,
write=write,
read=read)
@watch('TargetRemovebreakpoint')
@action_valid_decorator_factory(TargetStates.STOPPED, 'execution')
def remove_breakpoint(self, bkptno):
"""Deletes a breakpoint"""
return self.protocols.execution.remove_breakpoint(bkptno)
def update_state(self, state):
self.log.info("State changed to %s", TargetStates(state))
self.state = state
@watch('TargetWait')
def wait(self, state=TargetStates.STOPPED|TargetStates.EXITED):
if state & TargetStates.SYNCING:
self.log.warn("Waiting on SYNCING-state - this could lead to races")
while True:
if self.state & state != 0 and \
(state & TargetStates.SYNCING or \
self.state & TargetStates.SYNCING == 0):
break
time.sleep(.001) # send thread a ms to sleep to free resources
def get_status(self):
"""
Returns useful information about the target as a dict.
"""
self.status['state'] = self.state
return self.status
def _get_unique_name(self, i=0):
classname = type(self).__name__
targetname = "{}{}".format(classname, i)
if self.avatar and self.avatar.targets and targetname in self.avatar.targets:
return self._get_unique_name(i + 1)
return targetname
def _resolve_executable_name(self):
"""
Resolves the name of the executable for the endpoint.
Order of operation:
1: Check if config exists and whether target is installed
2: Check sys_name from default config
3: Check apt_name from default config
4: BailOut
"""
pass
# ##generic aliases##
wr = write_register
rr = read_register
rm = read_memory
wm = write_memory
bp = set_breakpoint
| |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larsoner@uw.edu>
#
# License: BSD (3-clause)
import numpy as np
from copy import deepcopy
from ..surface import (fast_cross_3d, _find_nearest_tri_pt, _get_tri_supp_geom,
_triangle_coords)
from ..io.constants import FIFF
from ..transforms import apply_trans
from ..utils import logger, verbose
from ..parallel import parallel_func
from ..io.compensator import get_current_comp, make_compensator
from ..io.pick import pick_types
# #############################################################################
# COIL SPECIFICATION AND FIELD COMPUTATION MATRIX
def _dup_coil_set(coils, coord_frame, t):
"""Make a duplicate"""
if t is not None and coord_frame != t['from']:
raise RuntimeError('transformation frame does not match the coil set')
coils = deepcopy(coils)
if t is not None:
coord_frame = t['to']
for coil in coils:
coil['r0'] = apply_trans(t['trans'], coil['r0'])
coil['ex'] = apply_trans(t['trans'], coil['ex'], False)
coil['ey'] = apply_trans(t['trans'], coil['ey'], False)
coil['ez'] = apply_trans(t['trans'], coil['ez'], False)
coil['rmag'] = apply_trans(t['trans'], coil['rmag'])
coil['cosmag'] = apply_trans(t['trans'], coil['cosmag'], False)
coil['coord_frame'] = t['to']
return coils, coord_frame
def _check_coil_frame(coils, coord_frame, bem):
"""Check to make sure the coils are in the correct coordinate frame"""
if coord_frame != FIFF.FIFFV_COORD_MRI:
if coord_frame == FIFF.FIFFV_COORD_HEAD:
# Make a transformed duplicate
coils, coord_Frame = _dup_coil_set(coils, coord_frame,
bem['head_mri_t'])
else:
raise RuntimeError('Bad coil coordinate frame %s' % coord_frame)
return coils, coord_frame
def _lin_field_coeff(s, mult, rmags, cosmags, ws, counts, n_jobs):
"""Use the linear field approximation to get field coefficients"""
parallel, p_fun, _ = parallel_func(_do_lin_field_coeff, n_jobs)
nas = np.array_split
coeffs = parallel(p_fun(s['rr'], t, tn, ta,
rmags, cosmags, ws, counts)
for t, tn, ta in zip(nas(s['tris'], n_jobs),
nas(s['tri_nn'], n_jobs),
nas(s['tri_area'], n_jobs)))
return mult * np.sum(coeffs, axis=0)
def _do_lin_field_coeff(rr, t, tn, ta, rmags, cosmags, ws, counts):
"""Actually get field coefficients (parallel-friendly)"""
coeff = np.zeros((len(counts), len(rr)))
bins = np.repeat(np.arange(len(counts)), counts)
for tri, tri_nn, tri_area in zip(t, tn, ta):
# Accumulate the coefficients for each triangle node
# and add to the corresponding coefficient matrix
tri_rr = rr[tri]
# The following is equivalent to:
# for j, coil in enumerate(coils['coils']):
# x = func(coil['rmag'], coil['cosmag'],
# tri_rr, tri_nn, tri_area)
# res = np.sum(coil['w'][np.newaxis, :] * x, axis=1)
# coeff[j][tri + off] += mult * res
# Simple version (bem_lin_field_coeffs_simple)
zz = []
for trr in tri_rr:
diff = rmags - trr
dl = np.sum(diff * diff, axis=1)
c = fast_cross_3d(diff, tri_nn[np.newaxis, :])
x = tri_area * np.sum(c * cosmags, axis=1) / \
(3.0 * dl * np.sqrt(dl))
zz += [np.bincount(bins, weights=x * ws, minlength=len(counts))]
coeff[:, tri] += np.array(zz).T
return coeff
def _concatenate_coils(coils):
"""Helper to concatenate coil parameters"""
rmags = np.concatenate([coil['rmag'] for coil in coils])
cosmags = np.concatenate([coil['cosmag'] for coil in coils])
ws = np.concatenate([coil['w'] for coil in coils])
counts = np.array([len(coil['rmag']) for coil in coils])
return rmags, cosmags, ws, counts
def _bem_specify_coils(bem, coils, coord_frame, mults, n_jobs):
"""Set up for computing the solution at a set of coils"""
# Compute the weighting factors to obtain the magnetic field
# in the linear potential approximation
coils, coord_frame = _check_coil_frame(coils, coord_frame, bem)
# leaving this in in case we want to easily add in the future
# if method != 'simple': # in ['ferguson', 'urankar']:
# raise NotImplementedError
# Process each of the surfaces
rmags, cosmags, ws, counts = _concatenate_coils(coils)
lens = np.cumsum(np.r_[0, [len(s['rr']) for s in bem['surfs']]])
coeff = np.empty((len(counts), lens[-1]))
for o1, o2, surf, mult in zip(lens[:-1], lens[1:],
bem['surfs'], bem['field_mult']):
coeff[:, o1:o2] = _lin_field_coeff(surf, mult, rmags, cosmags,
ws, counts, n_jobs)
# put through the bem
sol = np.dot(coeff, bem['solution'])
sol *= mults
return sol
def _bem_specify_els(bem, els, mults):
"""Set up for computing the solution at a set of electrodes"""
sol = np.zeros((len(els), bem['solution'].shape[1]))
# Go through all coils
scalp = bem['surfs'][0]
scalp['geom'] = _get_tri_supp_geom(scalp['tris'], scalp['rr'])
inds = np.arange(len(scalp['tris']))
# In principle this could be parallelized, but pickling overhead is huge
# (makes it slower than non-parallel)
for k, el in enumerate(els):
# Go through all 'integration points'
el_r = apply_trans(bem['head_mri_t']['trans'], el['rmag'])
for elw, r in zip(el['w'], el_r):
best = _find_nearest_tri_pt(inds, r, scalp['geom'], True)[2]
# Calculate a linear interpolation between the vertex values
tri = scalp['tris'][best]
x, y, z = _triangle_coords(r, scalp['geom'], best)
w = elw * np.array([(1.0 - x - y), x, y])
amt = np.dot(w, bem['solution'][tri])
sol[k] += amt
sol *= mults
return sol
# #############################################################################
# COMPENSATION
def _make_ctf_comp_coils(info, coils):
"""Get the correct compensator for CTF coils"""
# adapted from mne_make_ctf_comp() from mne_ctf_comp.c
logger.info('Setting up compensation data...')
comp_num = get_current_comp(info)
if comp_num is None or comp_num == 0:
logger.info(' No compensation set. Nothing more to do.')
return None
# Need to meaningfully populate comp['set'] dict a.k.a. compset
n_comp_ch = sum([c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']])
logger.info(' %d out of %d channels have the compensation set.'
% (n_comp_ch, len(coils)))
# Find the desired compensation data matrix
compensator = make_compensator(info, 0, comp_num, True)
logger.info(' Desired compensation data (%s) found.' % comp_num)
logger.info(' All compensation channels found.')
logger.info(' Preselector created.')
logger.info(' Compensation data matrix created.')
logger.info(' Postselector created.')
return compensator
# #############################################################################
# BEM COMPUTATION
_MAG_FACTOR = 1e-7 # from C code
# def _bem_inf_pot(rd, Q, rp):
# """The infinite medium potential in one direction"""
# # NOTE: the (4.0 * np.pi) that was in the denominator has been moved!
# diff = rp - rd
# diff2 = np.sum(diff * diff, axis=1)
# return np.sum(Q * diff, axis=1) / (diff2 * np.sqrt(diff2))
def _bem_inf_pots(rr, surf_rr, Q=None):
"""The infinite medium potential in all 3 directions"""
# NOTE: the (4.0 * np.pi) that was in the denominator has been moved!
diff = surf_rr.T[np.newaxis, :, :] - rr[:, :, np.newaxis] # n_rr, 3, n_bem
diff_norm = np.sum(diff * diff, axis=1)
diff_norm *= np.sqrt(diff_norm)
diff_norm[diff_norm == 0] = 1 # avoid nans
if Q is None: # save time when Q=np.eye(3) (e.g., MEG sensors)
return diff / diff_norm[:, np.newaxis, :]
else: # get components in each direction (e.g., EEG sensors)
return np.einsum('ijk,mj->imk', diff, Q) / diff_norm[:, np.newaxis, :]
# This function has been refactored to process all points simultaneously
# def _bem_inf_field(rd, Q, rp, d):
# """Infinite-medium magnetic field"""
# diff = rp - rd
# diff2 = np.sum(diff * diff, axis=1)
# x = fast_cross_3d(Q[np.newaxis, :], diff)
# return np.sum(x * d, axis=1) / (diff2 * np.sqrt(diff2))
def _bem_inf_fields(rr, rp, c):
"""Infinite-medium magnetic field in all 3 basis directions"""
# Knowing that we're doing all directions, the above can be refactored:
diff = rp.T[np.newaxis, :, :] - rr[:, :, np.newaxis]
diff_norm = np.sum(diff * diff, axis=1)
diff_norm *= np.sqrt(diff_norm)
diff_norm[diff_norm == 0] = 1 # avoid nans
# This is the result of cross-prod calcs with basis vectors,
# as if we had taken (Q=np.eye(3)), then multiplied by the cosmags (c)
# factor, and then summed across directions
x = np.array([diff[:, 1] * c[:, 2] - diff[:, 2] * c[:, 1],
diff[:, 2] * c[:, 0] - diff[:, 0] * c[:, 2],
diff[:, 0] * c[:, 1] - diff[:, 1] * c[:, 0]])
return np.rollaxis(x / diff_norm, 1)
def _bem_pot_or_field(rr, mri_rr, mri_Q, coils, solution, srr,
n_jobs, coil_type):
"""Calculate the magnetic field or electric potential
The code is very similar between EEG and MEG potentials, so we'll
combine them.
This does the work of "fwd_comp_field" (which wraps to "fwd_bem_field") and
"fwd_bem_pot_els" in MNE-C.
"""
# Both MEG and EEG have the inifinite-medium potentials
# This could be just vectorized, but eats too much memory, so instead we
# reduce memory by chunking within _do_inf_pots and parallelize, too:
parallel, p_fun, _ = parallel_func(_do_inf_pots, n_jobs)
nas = np.array_split
B = np.sum(parallel(p_fun(mri_rr, sr.copy(), mri_Q, sol.copy())
for sr, sol in zip(nas(srr, n_jobs),
nas(solution.T, n_jobs))), axis=0)
# The copy()s above should make it so the whole objects don't need to be
# pickled...
# Only MEG gets the primary current distribution
if coil_type == 'meg':
# Primary current contribution (can be calc. in coil/dipole coords)
parallel, p_fun, _ = parallel_func(_do_prim_curr, n_jobs)
pcc = np.concatenate(parallel(p_fun(rr, c)
for c in nas(coils, n_jobs)), axis=1)
B += pcc
B *= _MAG_FACTOR
return B
def _do_prim_curr(rr, coils):
"""Calculate primary currents in a set of coils"""
out = np.empty((len(rr) * 3, len(coils)))
for ci, c in enumerate(coils):
out[:, ci] = np.sum(c['w'] * _bem_inf_fields(rr, c['rmag'],
c['cosmag']), 2).ravel()
return out
def _do_inf_pots(rr, srr, mri_Q, sol):
"""Calculate infinite potentials using chunks"""
# The following code is equivalent to this, but saves memory
# v0s = _bem_inf_pots(rr, srr, mri_Q) # n_rr x 3 x n_surf_rr
# v0s.shape = (len(rr) * 3, v0s.shape[2])
# B = np.dot(v0s, sol)
# We chunk the source rr's in order to save memory
bounds = np.r_[np.arange(0, len(rr), 1000), len(rr)]
B = np.empty((len(rr) * 3, sol.shape[1]))
for bi in range(len(bounds) - 1):
v0s = _bem_inf_pots(rr[bounds[bi]:bounds[bi + 1]], srr, mri_Q)
v0s.shape = (v0s.shape[0] * 3, v0s.shape[2])
B[3 * bounds[bi]:3 * bounds[bi + 1]] = np.dot(v0s, sol)
return B
# #############################################################################
# SPHERE COMPUTATION
def _sphere_pot_or_field(rr, mri_rr, mri_Q, coils, sphere, srr,
n_jobs, coil_type):
"""Do potential or field for spherical model"""
fun = _eeg_spherepot_coil if coil_type == 'eeg' else _sphere_field
parallel, p_fun, _ = parallel_func(fun, n_jobs)
B = np.concatenate(parallel(p_fun(r, coils, sphere)
for r in np.array_split(rr, n_jobs)))
return B
def _sphere_field(rrs, coils, sphere):
"""This uses Jukka Sarvas' field computation
Jukka Sarvas, "Basic mathematical and electromagnetic concepts of the
biomagnetic inverse problem", Phys. Med. Biol. 1987, Vol. 32, 1, 11-22.
The formulas have been manipulated for efficient computation
by Matti Hamalainen, February 1990
"""
rmags, cosmags, ws, counts = _concatenate_coils(coils)
bins = np.repeat(np.arange(len(counts)), counts)
# Shift to the sphere model coordinates
rrs = rrs - sphere['r0']
B = np.zeros((3 * len(rrs), len(coils)))
for ri, rr in enumerate(rrs):
# Check for a dipole at the origin
if np.sqrt(np.dot(rr, rr)) <= 1e-10:
continue
this_poss = rmags - sphere['r0']
# Vector from dipole to the field point
a_vec = this_poss - rr
a = np.sqrt(np.sum(a_vec * a_vec, axis=1))
r = np.sqrt(np.sum(this_poss * this_poss, axis=1))
rr0 = np.sum(this_poss * rr, axis=1)
ar = (r * r) - rr0
ar0 = ar / a
F = a * (r * a + ar)
gr = (a * a) / r + ar0 + 2.0 * (a + r)
g0 = a + 2 * r + ar0
# Compute the dot products needed
re = np.sum(this_poss * cosmags, axis=1)
r0e = np.sum(rr * cosmags, axis=1)
g = (g0 * r0e - gr * re) / (F * F)
good = (a > 0) | (r > 0) | ((a * r) + 1 > 1e-5)
v1 = fast_cross_3d(rr[np.newaxis, :], cosmags)
v2 = fast_cross_3d(rr[np.newaxis, :], this_poss)
xx = ((good * ws)[:, np.newaxis] *
(v1 / F[:, np.newaxis] + v2 * g[:, np.newaxis]))
zz = np.array([np.bincount(bins, weights=x,
minlength=len(counts)) for x in xx.T])
B[3 * ri:3 * ri + 3, :] = zz
B *= _MAG_FACTOR
return B
def _eeg_spherepot_coil(rrs, coils, sphere):
"""Calculate the EEG in the sphere model"""
rmags, cosmags, ws, counts = _concatenate_coils(coils)
bins = np.repeat(np.arange(len(counts)), counts)
# Shift to the sphere model coordinates
rrs = rrs - sphere['r0']
B = np.zeros((3 * len(rrs), len(coils)))
for ri, rr in enumerate(rrs):
# Only process dipoles inside the innermost sphere
if np.sqrt(np.dot(rr, rr)) >= sphere['layers'][0]['rad']:
continue
# fwd_eeg_spherepot_vec
vval_one = np.zeros((len(rmags), 3))
# Make a weighted sum over the equivalence parameters
for eq in range(sphere['nfit']):
# Scale the dipole position
rd = sphere['mu'][eq] * rr
rd2 = np.sum(rd * rd)
rd2_inv = 1.0 / rd2
# Go over all electrodes
this_pos = rmags - sphere['r0']
# Scale location onto the surface of the sphere (not used)
# if sphere['scale_pos']:
# pos_len = (sphere['layers'][-1]['rad'] /
# np.sqrt(np.sum(this_pos * this_pos, axis=1)))
# this_pos *= pos_len
# Vector from dipole to the field point
a_vec = this_pos - rd
# Compute the dot products needed
a = np.sqrt(np.sum(a_vec * a_vec, axis=1))
a3 = 2.0 / (a * a * a)
r2 = np.sum(this_pos * this_pos, axis=1)
r = np.sqrt(r2)
rrd = np.sum(this_pos * rd, axis=1)
ra = r2 - rrd
rda = rrd - rd2
# The main ingredients
F = a * (r * a + ra)
c1 = a3 * rda + 1.0 / a - 1.0 / r
c2 = a3 + (a + r) / (r * F)
# Mix them together and scale by lambda/(rd*rd)
m1 = (c1 - c2 * rrd)
m2 = c2 * rd2
vval_one += (sphere['lambda'][eq] * rd2_inv *
(m1[:, np.newaxis] * rd +
m2[:, np.newaxis] * this_pos))
# compute total result
xx = vval_one * ws[:, np.newaxis]
zz = np.array([np.bincount(bins, weights=x,
minlength=len(counts)) for x in xx.T])
B[3 * ri:3 * ri + 3, :] = zz
# finishing by scaling by 1/(4*M_PI)
B *= 0.25 / np.pi
return B
# #############################################################################
# MAGNETIC DIPOLE (e.g. CHPI)
def _magnetic_dipole_field_vec(rrs, coils):
"""Compute an MEG forward solution for a set of magnetic dipoles"""
fwd = np.empty((3 * len(rrs), len(coils)))
# The code below is a more efficient version (~30x) of this:
# for ri, rr in enumerate(rrs):
# for k in range(len(coils)):
# this_coil = coils[k]
# # Go through all points
# diff = this_coil['rmag'] - rr
# dist2 = np.sum(diff * diff, axis=1)[:, np.newaxis]
# dist = np.sqrt(dist2)
# if (dist < 1e-5).any():
# raise RuntimeError('Coil too close')
# dist5 = dist2 * dist2 * dist
# sum_ = (3 * diff * np.sum(diff * this_coil['cosmag'],
# axis=1)[:, np.newaxis] -
# dist2 * this_coil['cosmag']) / dist5
# fwd[3*ri:3*ri+3, k] = 1e-7 * np.dot(this_coil['w'], sum_)
fwd = np.empty((3 * len(rrs), len(coils)))
rmags, cosmags, ws, counts = _concatenate_coils(coils)
bins = np.repeat(np.arange(len(counts)), counts)
for ri, rr in enumerate(rrs):
diff = rmags - rr
dist2 = np.sum(diff * diff, axis=1)[:, np.newaxis]
dist = np.sqrt(dist2)
if (dist < 1e-5).any():
raise RuntimeError('Coil too close (dist = %g m)' % dist.min())
sum_ = ws[:, np.newaxis] * (3 * diff * np.sum(diff * cosmags,
axis=1)[:, np.newaxis] -
dist2 * cosmags) / (dist2 * dist2 * dist)
for ii in range(3):
fwd[3 * ri + ii] = np.bincount(bins, weights=sum_[:, ii],
minlength=len(counts))
fwd *= 1e-7
return fwd
# #############################################################################
# MAIN TRIAGING FUNCTION
@verbose
def _prep_field_computation(rr, bem, fwd_data, n_jobs, verbose=None):
"""Precompute some things that are used for both MEG and EEG"""
cf = FIFF.FIFFV_COORD_HEAD
srr = mults = mri_Q = head_mri_t = None
if not bem['is_sphere']:
if bem['bem_method'] != 'linear collocation':
raise RuntimeError('only linear collocation supported')
mults = np.repeat(bem['source_mult'] / (4.0 * np.pi),
[len(s['rr']) for s in bem['surfs']])[np.newaxis, :]
srr = np.concatenate([s['rr'] for s in bem['surfs']])
# The dipole location and orientation must be transformed
head_mri_t = bem['head_mri_t']
mri_Q = apply_trans(bem['head_mri_t']['trans'], np.eye(3), False)
if len(set(fwd_data['coil_types'])) != len(fwd_data['coil_types']):
raise RuntimeError('Non-unique coil types found')
compensators, solutions, csolutions = [], [], []
for coil_type, coils, ccoils, info in zip(fwd_data['coil_types'],
fwd_data['coils_list'],
fwd_data['ccoils_list'],
fwd_data['infos']):
compensator = solution = csolution = None
if len(coils) > 0: # something to actually do
if coil_type == 'meg':
# Compose a compensation data set if necessary
compensator = _make_ctf_comp_coils(info, coils)
if not bem['is_sphere']:
# multiply solution by "mults" here for simplicity
if coil_type == 'meg':
# Field computation matrices for BEM
start = 'Composing the field computation matrix'
logger.info('\n' + start + '...')
solution = _bem_specify_coils(bem, coils, cf,
mults, n_jobs)
if compensator is not None:
logger.info(start + ' (compensation coils)...')
csolution = _bem_specify_coils(bem, ccoils, cf,
mults, n_jobs)
else:
solution = _bem_specify_els(bem, coils, mults)
else:
solution = bem
if coil_type == 'eeg':
logger.info('Using the equivalent source approach in the '
'homogeneous sphere for EEG')
compensators.append(compensator)
solutions.append(solution)
csolutions.append(csolution)
fun = _sphere_pot_or_field if bem['is_sphere'] else _bem_pot_or_field
fwd_data.update(dict(srr=srr, mri_Q=mri_Q, head_mri_t=head_mri_t,
compensators=compensators, solutions=solutions,
csolutions=csolutions, fun=fun))
@verbose
def _compute_forwards_meeg(rr, fd, n_jobs, verbose=None):
# Now, actually compute MEG and EEG solutions
n_jobs = max(min(n_jobs, len(rr)), 1)
Bs = list()
# The dipole location and orientation must be transformed
mri_rr = None
if fd['head_mri_t'] is not None:
mri_rr = apply_trans(fd['head_mri_t']['trans'], rr)
mri_Q, srr, fun = fd['mri_Q'], fd['srr'], fd['fun']
for ci in range(len(fd['coils_list'])):
coils, ccoils = fd['coils_list'][ci], fd['ccoils_list'][ci]
coil_type, compensator = fd['coil_types'][ci], fd['compensators'][ci]
solution, csolution = fd['solutions'][ci], fd['csolutions'][ci]
info = fd['infos'][ci]
if len(coils) == 0: # nothing to do
Bs.append(np.zeros((3 * len(rr), 0)))
continue
# Do the actual calculation
logger.info('Computing %s at %d source location%s '
'(free orientations)...'
% (coil_type.upper(), len(rr),
'' if len(rr) == 1 else 's'))
B = fun(rr, mri_rr, mri_Q, coils, solution, srr,
n_jobs, coil_type)
# Compensate if needed (only done for MEG systems w/compensation)
if compensator is not None:
# Compute the field in the compensation coils
work = fun(rr, mri_rr, mri_Q, ccoils, csolution, srr,
n_jobs, coil_type)
# Combine solutions so we can do the compensation
both = np.zeros((work.shape[0], B.shape[1] + work.shape[1]))
picks = pick_types(info, meg=True, ref_meg=False)
both[:, picks] = B
picks = pick_types(info, meg=False, ref_meg=True)
both[:, picks] = work
B = np.dot(both, compensator.T)
Bs.append(B)
return Bs
@verbose
def _compute_forwards(rr, bem, coils_list, ccoils_list,
infos, coil_types, n_jobs, verbose=None):
"""Compute the MEG and EEG forward solutions
This effectively combines compute_forward_meg and compute_forward_eeg
from MNE-C.
"""
# These are split into two steps to save (potentially) a lot of time
# when e.g. dipole fitting
fwd_data = dict(coils_list=coils_list, ccoils_list=ccoils_list,
infos=infos, coil_types=coil_types)
_prep_field_computation(rr, bem, fwd_data, n_jobs)
Bs = _compute_forwards_meeg(rr, fwd_data, n_jobs)
return Bs
| |
"""
pymel ipython configuration
Current Features
----------------
tab completion of depend nodes, dag nodes, and attributes
automatic import of pymel
Future Features
---------------
- tab completion of PyNode attributes
- color coding of tab complete options
- to differentiate between methods and attributes
- dag nodes vs depend nodes
- shortNames vs longNames
- magic commands
- bookmarking of maya's recent project and files
To Use
------
place in your PYTHONPATH
add the following line to the 'main' function of $HOME/.ipython/ipy_user_conf.py::
import ipymel
Author: Chad Dombrova
"""
from optparse import OptionParser
try:
import maya
except ImportError, e:
print("ipymel can only be setup if the maya package can be imported")
raise e
import IPython
ipy_ver = IPython.__version__.split('.')
ipy_ver = [int(x) if x.isdigit() else x for x in ipy_ver]
ver11 = ipy_ver >= [0, 11]
if not ver11:
def get_ipython():
import IPython.ipapi
return IPython.ipapi.get()
IPython.ipapi.IPApi.define_magic = IPython.ipapi.IPApi.expose_magic
import IPython.ColorANSI as coloransi
from IPython.genutils import page
from IPython.ipapi import UsageError
import IPython.Extensions.ipy_completers
def get_colors(obj):
return color_table[obj.rc.colors].colors
else:
import IPython.utils.coloransi as coloransi
from IPython.core.page import page
from IPython.core.error import UsageError
def get_colors(obj):
return color_table[ip.colors].colors
Colors = coloransi.TermColors
ColorScheme = coloransi.ColorScheme
ColorSchemeTable = coloransi.ColorSchemeTable
ip = None
try:
import readline
except ImportError:
import pyreadline as readline
delim = readline.get_completer_delims()
delim = delim.replace('|', '') # remove pipes
delim = delim.replace(':', '') # remove colon
# delim = delim.replace("'", '') # remove quotes
# delim = delim.replace('"', '') # remove quotes
readline.set_completer_delims(delim)
import inspect
import re
import glob
import os
import shlex
import sys
# don't import pymel here, as this will trigger loading of maya/pymel
# immediately, and things in the userSetup.py won't get properly entered into
# the ipython shell's namespace... we need the startup of maya to happen
# from "within" ipython, ie, when we do:
# ip.ex("from pymel.core import *")
# from pymel import core
# ...maya.cmds is ok to import before maya is started up, though - it just
# won't be populated yet...
import maya.cmds as cmds
_scheme_default = 'Linux'
# Build a few color schemes
NoColor = ColorScheme(
'NoColor', {
'instance': Colors.NoColor,
'collapsed': Colors.NoColor,
'tree': Colors.NoColor,
'transform': Colors.NoColor,
'shape': Colors.NoColor,
'nonunique': Colors.NoColor,
'nonunique_transform': Colors.NoColor,
'normal': Colors.NoColor # color off (usu. Colors.Normal)
})
LinuxColors = ColorScheme(
'Linux', {
'instance': Colors.LightCyan,
'collapsed': Colors.Yellow,
'tree': Colors.Green,
'transform': Colors.White,
'shape': Colors.LightGray,
'nonunique': Colors.Red,
'nonunique_transform': Colors.LightRed,
'normal': Colors.Normal # color off (usu. Colors.Normal)
})
LightBGColors = ColorScheme(
'LightBG', {
'instance': Colors.Cyan,
'collapsed': Colors.LightGreen,
'tree': Colors.Blue,
'transform': Colors.DarkGray,
'shape': Colors.Black,
'nonunique': Colors.Red,
'nonunique_transform': Colors.LightRed,
'normal': Colors.Normal # color off (usu. Colors.Normal)
})
# Build table of color schemes (needed by the dag_parser)
color_table = ColorSchemeTable([NoColor, LinuxColors, LightBGColors],
_scheme_default)
def finalPipe(obj):
"""
DAG nodes with children should end in a pipe (|), so that each successive pressing
of TAB will take you further down the DAG hierarchy. this is analagous to TAB
completion of directories, which always places a final slash (/) after a directory.
"""
if cmds.listRelatives(obj):
return obj + "|"
return obj
def splitDag(obj):
buf = obj.split('|')
tail = buf[-1]
path = '|'.join(buf[:-1])
return path, tail
def expand(obj):
"""
allows for completion of objects that reside within a namespace. for example,
``tra*`` will match ``trak:camera`` and ``tram``
for now, we will hardwire the search to a depth of three recursive namespaces.
TODO:
add some code to determine how deep we should go
"""
return (obj + '*', obj + '*:*', obj + '*:*:*')
def complete_node_with_no_path(node):
tmpres = cmds.ls(expand(node))
# print "node_with_no_path", tmpres, node, expand(node)
res = []
for x in tmpres:
x = finalPipe(x.split('|')[-1])
#x = finalPipe(x)
if x not in res:
res.append(x)
# print res
return res
def complete_node_with_attr(node, attr):
# print "noe_with_attr", node, attr
long_attrs = cmds.listAttr(node)
short_attrs = cmds.listAttr(node, shortNames=1)
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
return [u'%s.%s' % (node, a) for a in attrs if a.startswith(attr)]
def pymel_name_completer(self, event):
def get_children(obj):
path, partialObj = splitDag(obj)
# print "getting children", repr(path), repr(partialObj)
try:
fullpath = cmds.ls(path, l=1)[0]
if not fullpath:
return []
children = cmds.listRelatives(fullpath, f=1, c=1)
if not children:
return []
except:
return []
matchStr = fullpath + '|' + partialObj
# print "children", children
# print matchStr, fullpath, path
matches = [x.replace(fullpath, path, 1) for x in children if x.startswith(matchStr)]
# print matches
return matches
# print "\nnode", repr(event.symbol), repr(event.line)
# print "\nbegin"
line = event.symbol
matches = None
#--------------
# Attributes
#--------------
m = re.match( r"""([a-zA-Z_0-9|:.]+)\.(\w*)$""", line)
if m:
node, attr = m.groups()
if node == 'SCENE':
res = cmds.ls(attr + '*')
if res:
matches = ['SCENE.' + x for x in res if '|' not in x]
elif node.startswith('SCENE.'):
node = node.replace('SCENE.', '')
matches = ['SCENE.' + x for x in complete_node_with_attr(node, attr) if '|' not in x]
else:
matches = complete_node_with_attr(node, attr)
#--------------
# Nodes
#--------------
else:
# we don't yet have a full node
if '|' not in line or (line.startswith('|') and line.count('|') == 1):
# print "partial node"
kwargs = {}
if line.startswith('|'):
kwargs['l'] = True
matches = cmds.ls(expand(line), **kwargs)
# we have a full node, get it's children
else:
matches = get_children(line)
if not matches:
raise IPython.ipapi.TryNext
# if we have only one match, get the children as well
if len(matches) == 1:
res = get_children(matches[0] + '|')
matches += res
return matches
def pymel_python_completer(self, event):
"""Match attributes or global python names"""
import pymel.core as pm
# print "python_matches"
text = event.symbol
# print repr(text)
# Another option, seems to work great. Catches things like ''.<tab>
m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
if not m:
raise IPython.ipapi.TryNext
expr, attr = m.group(1, 3)
# print type(self.Completer), dir(self.Completer)
# print self.Completer.namespace
# print self.Completer.global_namespace
try:
# print "first"
obj = eval(expr, self.Completer.namespace)
except:
try:
# print "second"
obj = eval(expr, self.Completer.global_namespace)
except:
raise IPython.ipapi.TryNext
# print "complete"
if isinstance(obj, (pm.nt.DependNode, pm.Attribute)):
# print "isinstance"
node = unicode(obj)
long_attrs = cmds.listAttr(node)
short_attrs = cmds.listAttr(node, shortNames=1)
matches = []
matches = self.Completer.python_matches(text)
# print "here"
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
# print "returning"
matches += [expr + '.' + at for at in attrs]
#import colorize
#matches = [ colorize.colorize(x,'magenta') for x in matches ]
return matches
raise IPython.ipapi.TryNext
def buildRecentFileMenu():
import pymel.core as pm
if "RecentFilesList" not in pm.optionVar:
return
# get the list
RecentFilesList = pm.optionVar["RecentFilesList"]
nNumItems = len(RecentFilesList)
RecentFilesMaxSize = pm.optionVar["RecentFilesMaxSize"]
# # check if there are too many items in the list
# if (RecentFilesMaxSize < nNumItems):
#
# #if so, truncate the list
# nNumItemsToBeRemoved = nNumItems - RecentFilesMaxSize
#
# #Begin removing items from the head of the array (least recent file in the list)
# for ($i = 0; $i < $nNumItemsToBeRemoved; $i++):
#
# core.optionVar -removeFromArray "RecentFilesList" 0;
#
# RecentFilesList = core.optionVar["RecentFilesList"]
# nNumItems = len($RecentFilesList);
# The RecentFilesTypeList optionVar may not exist since it was
# added after the RecentFilesList optionVar. If it doesn't exist,
# we create it and initialize it with a guess at the file type
if nNumItems > 0:
if "RecentFilesTypeList" not in pm.optionVar:
pm.mel.initRecentFilesTypeList(RecentFilesList)
RecentFilesTypeList = pm.optionVar["RecentFilesTypeList"]
# toNativePath
# first, check if we are the same.
def open_completer(self, event):
relpath = event.symbol
# print event # dbg
if '-b' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', {})
return bkms.keys()
if event.symbol == '-':
print "completer"
width_dh = str(len(str(len(ip.user_ns['_sh']) + 1)))
print width_dh
# jump in directory history by number
fmt = '-%0' + width_dh + 'd [%s]'
ents = [fmt % (i, s) for i, s in enumerate(ip.user_ns['_sh'])]
if len(ents) > 1:
return ents
return []
raise IPython.ipapi.TryNext
class TreePager(object):
def __init__(self, colors, options):
self.colors = colors
self.options = options
# print options.depth
def do_level(self, obj, depth, isLast):
if isLast[-1]:
sep = '`-- '
else:
sep = '|-- '
#sep = '|__ '
depth += 1
branch = ''
for x in isLast[:-1]:
if x:
branch += ' '
else:
branch += '| '
branch = self.colors['tree'] + branch + sep + self.colors['normal']
children = self.getChildren(obj)
name = self.getName(obj)
num = len(children) - 1
if children:
if self.options.maxdepth and depth >= self.options.maxdepth:
state = '+'
else:
state = '-'
pre = self.colors['collapsed'] + state + ' '
else:
pre = ' '
yield pre + branch + name + self.colors['normal'] + '\n'
# yield Colors.Yellow + branch + sep + Colors.Normal+ name + '\n'
if not self.options.maxdepth or depth < self.options.maxdepth:
for i, x in enumerate(children):
for line in self.do_level(x, depth, isLast + [i == num]):
yield line
def make_tree(self, roots):
num = len(roots) - 1
tree = ''
for i, x in enumerate(roots):
for line in self.do_level(x, 0, [i == num]):
tree += line
return tree
class DagTree(TreePager):
def getChildren(self, obj):
if self.options.shapes:
return obj.getChildren()
else:
return obj.getChildren(type='transform')
def getName(self, obj):
import pymel.core as pm
name = obj.nodeName()
if obj.isInstanced():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
id = obj.instanceNumber()
if id != 0:
source = ' -> %s' % obj.getOtherInstances()[0]
else:
source = ''
name = color + name + self.colors['instance'] + ' [' + str(id) + ']' + source
elif not obj.isUniquelyNamed():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
name = color + name
elif isinstance(obj, pm.nt.Transform):
# bold
name = self.colors['transform'] + name
else:
name = self.colors['shape'] + name
return name
dag_parser = OptionParser()
dag_parser.add_option("-d", type="int", dest="maxdepth")
dag_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dag_parser.add_option("-s", action="store_true", dest="shapes")
def magic_dag(self, parameter_s=''):
"""
"""
import pymel.core as pm
options, args = dag_parser.parse_args(parameter_s.split())
colors = get_colors(self)
dagtree = DagTree(colors, options)
if args:
roots = [pm.PyNode(args[0])]
else:
roots = pm.ls(assemblies=1)
page(dagtree.make_tree(roots))
class DGHistoryTree(TreePager):
def getChildren(self, obj):
source, dest = obj
return source.node().listConnections(plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
def getName(self, obj):
source, dest = obj
name = "%s -> %s" % (source, dest)
return name
def make_tree(self, root):
import pymel.core as pm
roots = pm.listConnections(root, plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
return TreePager.make_tree(self, roots)
dg_parser = OptionParser()
dg_parser.add_option("-d", type="int", dest="maxdepth")
dg_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dg_parser.add_option("-s", action="store_true", dest="shapes")
def magic_dghist(self, parameter_s=''):
"""
"""
import pymel.core as pm
options, args = dg_parser.parse_args(parameter_s.split())
if not args:
print "must pass in nodes to display the history of"
return
colors = get_colors(self)
dgtree = DGHistoryTree(colors, options)
roots = [pm.PyNode(args[0])]
page(dgtree.make_tree(roots))
def magic_open(self, parameter_s=''):
"""Change the current working directory.
This command automatically maintains an internal list of directories
you visit during your IPython session, in the variable _sh. The
command %dhist shows this history nicely formatted. You can also
do 'cd -<tab>' to see directory history conveniently.
Usage:
openFile 'dir': changes to directory 'dir'.
openFile -: changes to the last visited directory.
openFile -<n>: changes to the n-th directory in the directory history.
openFile --foo: change to directory that matches 'foo' in history
openFile -b <bookmark_name>: jump to a bookmark set by %bookmark
(note: cd <bookmark_name> is enough if there is no
directory <bookmark_name>, but a bookmark with the name exists.)
'cd -b <tab>' allows you to tab-complete bookmark names.
Options:
-q: quiet. Do not print the working directory after the cd command is
executed. By default IPython's cd command does print this directory,
since the default prompts do not display path information.
Note that !cd doesn't work for this purpose because the shell where
!command runs is immediately discarded after executing 'command'."""
parameter_s = parameter_s.strip()
#bkms = self.shell.persist.get("bookmarks",{})
oldcwd = os.getcwd()
numcd = re.match(r'(-)(\d+)$', parameter_s)
# jump in directory history by number
if numcd:
nn = int(numcd.group(2))
try:
ps = ip.ev('_sh[%d]' % nn)
except IndexError:
print 'The requested directory does not exist in history.'
return
else:
opts = {}
# elif parameter_s.startswith('--'):
# ps = None
# fallback = None
# pat = parameter_s[2:]
# dh = self.shell.user_ns['_sh']
# # first search only by basename (last component)
# for ent in reversed(dh):
# if pat in os.path.basename(ent) and os.path.isdir(ent):
# ps = ent
# break
#
# if fallback is None and pat in ent and os.path.isdir(ent):
# fallback = ent
#
# # if we have no last part match, pick the first full path match
# if ps is None:
# ps = fallback
#
# if ps is None:
# print "No matching entry in directory history"
# return
# else:
# opts = {}
else:
# turn all non-space-escaping backslashes to slashes,
# for c:\windows\directory\names\
parameter_s = re.sub(r'\\(?! )', '/', parameter_s)
opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
# jump to previous
if ps == '-':
try:
ps = ip.ev('_sh[-2]' % nn)
except IndexError:
raise UsageError('%cd -: No previous directory to change to.')
# # jump to bookmark if needed
# else:
# if not os.path.exists(ps) or opts.has_key('b'):
# bkms = self.db.get('bookmarks', {})
#
# if bkms.has_key(ps):
# target = bkms[ps]
# print '(bookmark:%s) -> %s' % (ps,target)
# ps = target
# else:
# if opts.has_key('b'):
# raise UsageError("Bookmark '%s' not found. "
# "Use '%%bookmark -l' to see your bookmarks." % ps)
# at this point ps should point to the target dir
if ps:
ip.ex('openFile("%s", f=1)' % ps)
# try:
# os.chdir(os.path.expanduser(ps))
# if self.shell.rc.term_title:
# #print 'set term title:',self.shell.rc.term_title # dbg
# platutils.set_term_title('IPy ' + abbrev_cwd())
# except OSError:
# print sys.exc_info()[1]
# else:
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# else:
# os.chdir(self.shell.home_dir)
# if self.shell.rc.term_title:
# platutils.set_term_title("IPy ~")
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
#
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# if not 'q' in opts and self.shell.user_ns['_sh']:
# print self.shell.user_ns['_sh'][-1]
# maya sets a sigint / ctrl-c / KeyboardInterrupt handler that quits maya -
# want to override this to get "normal" python interpreter behavior, where it
# interrupts the current python command, but doesn't exit the interpreter
def ipymel_sigint_handler(signal, frame):
raise KeyboardInterrupt
def install_sigint_handler(force=False):
import signal
if force or signal.getsignal(signal.SIGINT) == ipymel_sigint_handler:
signal.signal(signal.SIGINT, ipymel_sigint_handler)
# unfortunately, it seems maya overrides the SIGINT hook whenever a plugin is
# loaded...
def sigint_plugin_loaded_callback(*args):
# from the docs, as of 2015 the args are:
# ( [ pathToPlugin, pluginName ], clientData )
install_sigint_handler()
sigint_plugin_loaded_callback_id = None
def setup(shell):
global ip
if hasattr(shell, 'get_ipython'):
ip = shell.get_ipython()
else:
ip = get_ipython()
ip.set_hook('complete_command', pymel_python_completer, re_key=".*")
ip.set_hook('complete_command', pymel_name_completer, re_key="(.+(\s+|\())|(SCENE\.)")
ip.set_hook('complete_command', open_completer, str_key="openf")
ip.ex("from pymel.core import *")
# stuff in __main__ is not necessarily in ipython's 'main' namespace... so
# if the user has something in userSetup.py that he wants put in the
# "interactive" namespace, it won't be - unless we do this:
ip.ex('from __main__ import *')
# if you don't want pymel imported into the main namespace, you can replace the above with something like:
#ip.ex("import pymel as pm")
ip.define_magic('openf', magic_open)
ip.define_magic('dag', magic_dag)
ip.define_magic('dghist', magic_dghist)
# add projects
ip.ex("""
import os.path
for _mayaproj in optionVar.get('RecentProjectsList', []):
_mayaproj = os.path.join( _mayaproj, 'scenes' )
if _mayaproj not in _dh:
_dh.append(_mayaproj)""")
# add files
ip.ex("""
import os.path
_sh=[]
for _mayaproj in optionVar.get('RecentFilesList', []):
if _mayaproj not in _sh:
_sh.append(_mayaproj)""")
# setup a handler for ctrl-c / SIGINT / KeyboardInterrupt, so maya / ipymel
# doesn't quit
install_sigint_handler(force=True)
# unfortunately, when Mental Ray loads, it installs a new SIGINT handler
# which restores the old "bad" behavior... need to install a plugin callback
# to restore ours...
global sigint_plugin_loaded_callback_id
import pymel.core as pm
if sigint_plugin_loaded_callback_id is None:
sigint_plugin_loaded_callback_id = pm.api.MSceneMessage.addStringArrayCallback(
pm.api.MSceneMessage.kAfterPluginLoad,
sigint_plugin_loaded_callback)
def main():
import IPython
ipy_ver = IPython.__version__.split('.')
ipy_ver = [int(x) if x.isdigit() else x for x in ipy_ver]
if ipy_ver < [0, 11]:
import IPython.Shell
shell = IPython.Shell.start()
setup(shell)
shell.mainloop()
else:
import IPython.frontend.terminal.ipapp
app = IPython.frontend.terminal.ipapp.TerminalIPythonApp.instance()
app.initialize()
setup(app.shell)
app.start()
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
# based on http://djangosnippets.org/snippets/1792/ by monokrome
from __future__ import absolute_import
from __future__ import unicode_literals
from django.http import HttpResponse, Http404
from django.conf import settings
from django.core import serializers
from django.db.models import Model
from django.apps import apps as django_apps
from django.db.models.query import QuerySet
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from datetime import date
import decimal, datetime
import csv
try:
import xlwt # XLS Writer, see pypi
xl_active = True
except ImportError:
xl_active = False
try:
import odf, odf.opendocument, odf.table # odfpy, see pypi
odf_active = True
except ImportError:
odf_active = False
import logging
logger = logging.getLogger(settings.PROJECT_NAME)
DEFAULT_PARAMS = {
'app_label': '',
'model_name': '',
'model': None,
'format': 'csv',
'fields': [],
'headers': [],
'charset': 'utf-8',
'filename': '',
'sheet_title': _('Export'),
}
ALLOWED_EXPORT_TYPES = {
'csv': {
'mimetype': 'text/csv',
# 'template': 'admin/export/csv',
'writer': csv.writer
},
'json': {
'mimetype': 'text/json',
'serializer': 'json',
},
'xml': {
'mimetype': 'text/xml',
'serializer': 'xml',
},
'yaml': {
'mimetype': 'text/yaml',
'serializer': 'yaml',
},
'py': {
'mimetype': 'application/python',
'serializer': 'python',
},
}
if xl_active:
class xlswriter(object):
"""
XLS creator as drop-in replacement for csv.writer
"""
# style0 = xlwt.easyxf('font: name Arial, color-index red, bold on', num_format_str='#,##0.00')
# style1 = xlwt.easyxf(num_format_str='D-MMM-YY')
def __init__(self, targetfile, **kwargs):
self.params = DEFAULT_PARAMS
self.params.update(kwargs)
self.stream = targetfile
self.xlwb = xlwt.Workbook(encoding=self.params['charset'])
self.xlws = self.xlwb.add_sheet(self.params['sheet_title'])
self.rowcounter = 0
def write_value(self, x, y, val, style):
self.xlws.write(y, x, val, style)
def write_formula(self, x, y, formula, style):
self.xlws.write(y, x, xlwt.Formula(formula), style)
def set_row_style(self, rownumber, style):
return self.xlws.row(rownumber).set_style(style)
def save(self, filename=None):
if not filename:
filename = self.stream
self.xlwb.save(filename)
self.rowcounter = 0
def writerow(self, fields, style=None):
if not style:
style = xlwt.Style.default_style
y = self.rowcounter
for x in range(len(fields)):
val = fields[x]
if hasattr(val, 'startswith') and val.startswith('='):
val = val.strip('=') # otherwise parsing error
self.write_formula(x, y, val, style)
else:
self.write_value(x, y, val, style)
self.set_row_style(y, style)
self.rowcounter += 1
def writerows(self, rows):
for row in rows:
self.writerow(row)
ALLOWED_EXPORT_TYPES['xls'] = {
'mimetype': 'application/vnd.ms-excel',
'writer': xlswriter
}
if odf_active:
class odswriter(object):
"""
ODS creator as drop-in replacement for csv.writer
"""
def __init__(self, targetfile, **kwargs):
self.params = DEFAULT_PARAMS
self.params.update(kwargs)
self.stream = targetfile
self.ods = odf.opendocument.OpenDocumentSpreadsheet()
self.odtable = odf.table.Table(name=self.params['sheet_title'])
self.ods.spreadsheet.addElement(self.odtable)
self.rowcounter = 0
def save(self, filename=None):
if not filename:
self.ods.write(self.stream)
else:
self.ods.save(filename)
self.rowcounter = 0
def writerow(self, fields, style=None):
row = odf.table.TableRow()
for x in range(len(fields)):
val = fields[x]
args = {'value':val}
if hasattr(val, 'startswith') and val.startswith('='):
args = {'formula': val}
elif type(val) in (str, unicode):
args = {'stringvalue': val, 'valuetype': 'string'}
elif type(val) in (decimal.Decimal,):
args = {'currency': 'EUR', 'valuetype': 'currency'}
elif type(val) in (int, float):
args['valuetype'] = 'float'
elif type(val) in (datetime.datetime, datetime.date):
args = {'datevalue': val, 'valuetype': 'date'}
elif type(val) in (datetime.time,):
args = {'timevalue': val, 'valuetype': 'time'}
elif type(val) in (bool,):
args = {'booleanvalue': val, 'valuetype': 'boolean'}
if style:
args['stylename'] = style
row.addElement(odf.table.TableCell(attributes=args))
self.odtable.addElement(row)
self.rowcounter += 1
def writerows(self, rows):
for row in rows:
self.writerow(row)
ALLOWED_EXPORT_TYPES['ods'] = {
'mimetype': 'application/vnd.oasis.opendocument.spreadsheet',
'writer': odswriter
}
def export(request, qs, **kwargs):
"""
This view exports data in one of several formats.
Keyword arguments:
:app_label:
application name
:model_name:
name of model within app_label
:model: django model
replacement for app_label and model_name
:format:
str, defined by `ALLOWED_EXPORT_TYPES`
csv, json, xml, yaml, py, xls, ods
default: csv
:fields:
list of model fields
default: all fields of given model
:headers:
column names for some formats
default: verbose_names of model's fields
:charset:
for text formats
default: utf-8
:filename:
output filename
default: <model_name>_<date>.<format>
"""
prm = DEFAULT_PARAMS
prm.update(kwargs)
exformat = prm['format']
if not exformat in ALLOWED_EXPORT_TYPES:
err = _(u'%s is not a supported format.') % exformat
logger.error(err)
raise Http404(err)
if prm['app_label'] and prm['model_name']:
model = django_apps.get_model(prm['app_label'], prm['model_name'])
elif prm['model']:
model = prm['model']
else:
model = None
if not prm['filename']:
prm['filename'] = '%s_%s.%s' % (
slugify(prm['model_name']),
date.today().strftime('%Y-%m-%d'),
exformat)
if model:
if not prm['fields']:
prm['fields'] = [f.name for f in model._meta.local_fields]
if not prm['headers']:
try:
prm['headers'] = [getattr(model, f).verbose_name for f in prm['fields']]
except Exception as e:
logger.error(e)
prm['headers'] = prm['fields']
mimetype = ALLOWED_EXPORT_TYPES[exformat]['mimetype']
response = HttpResponse(mimetype=mimetype)
response['Content-Type'] = '%s; charset=%s' % (mimetype, prm['charset'])
response['Content-Disposition'] = 'attachment; filename=%s' % prm['filename']
response['Cache-Control'] = 'must-revalidate'
response['Pragma'] = 'must-revalidate'
if 'writer' in ALLOWED_EXPORT_TYPES[exformat]:
writer = ALLOWED_EXPORT_TYPES[exformat]['writer'](response)
writer.writerow(prm['headers'])
for item in qs:
row = []
for field in prm['fields']:
val = getattr(item, field)
if callable(val):
val = val()
if isinstance(val, QuerySet):
val = ', '.join(x.__unicode__() for x in val.all())
elif isinstance(val, Model):
val = val.__unicode__()
elif isinstance(val, bool):
val = {True:_('Yes'), False:_('No')}[val]
elif val == None:
val = _('Unknown')
if type(val) is unicode and prm['format'] != 'ods':
val = val.encode(prm['charset'])
row.append(val)
writer.writerow(row)
if hasattr(writer, 'save'):
writer.save()
elif 'serializer' in ALLOWED_EXPORT_TYPES[exformat]:
serializer = serializers.get_serializer(
ALLOWED_EXPORT_TYPES[exformat]['serializer'])()
serializer.serialize(
qs.all(),
fields=prm['fields'],
ensure_ascii=False,
stream=response)
else:
err = _('Export type for %s must have value for writer or serializer') % exformat
logger.error(err)
raise Http404(err)
return response
| |
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Functional tests for ``flocker.node.agents.ebs`` using an EC2 cluster.
"""
import time
from uuid import uuid4
from bitmath import Byte
from boto.ec2.volume import (
Volume as EbsVolume, AttachmentSet
)
from boto.exception import EC2ResponseError
from twisted.python.constants import Names, NamedConstant
from twisted.trial.unittest import SkipTest, TestCase
from eliot.testing import LoggedMessage, capture_logging, assertHasMessage
from ..ebs import (
_wait_for_volume_state_change, BOTO_EC2RESPONSE_ERROR,
VolumeOperations, VolumeStateTable, VolumeStates,
TimeoutException, _should_finish, UnexpectedStateException
)
from .._logging import (
AWS_CODE, AWS_MESSAGE, AWS_REQUEST_ID, BOTO_LOG_HEADER,
IN_USE_DEVICES
)
from ..test.test_blockdevice import make_iblockdeviceapi_tests
from ..test.blockdevicefactory import (
InvalidConfig, ProviderType, get_blockdevice_config,
get_blockdeviceapi_with_cleanup, get_device_allocation_unit,
get_minimum_allocatable_size, get_ec2_client_for_test,
)
TIMEOUT = 5
def ebsblockdeviceapi_for_test(test_case):
"""
Create an ``EBSBlockDeviceAPI`` for use by tests.
"""
return get_blockdeviceapi_with_cleanup(test_case, ProviderType.aws)
class EBSBlockDeviceAPIInterfaceTests(
make_iblockdeviceapi_tests(
blockdevice_api_factory=(
lambda test_case: ebsblockdeviceapi_for_test(
test_case=test_case,
)
),
minimum_allocatable_size=get_minimum_allocatable_size(),
device_allocation_unit=get_device_allocation_unit(),
unknown_blockdevice_id_factory=lambda test: u"vol-00000000",
)
):
"""
Interface adherence Tests for ``EBSBlockDeviceAPI``.
"""
def test_foreign_volume(self):
"""
``list_volumes`` lists only those volumes
belonging to the current Flocker cluster.
"""
try:
config = get_blockdevice_config(ProviderType.aws)
except InvalidConfig as e:
raise SkipTest(str(e))
ec2_client = get_ec2_client_for_test(config)
requested_volume = ec2_client.connection.create_volume(
int(Byte(self.minimum_allocatable_size).to_GiB().value),
ec2_client.zone)
self.addCleanup(ec2_client.connection.delete_volume,
requested_volume.id)
_wait_for_volume_state_change(VolumeOperations.CREATE,
requested_volume)
self.assertEqual(self.api.list_volumes(), [])
def test_foreign_cluster_volume(self):
"""
``list_volumes`` excludes volumes belonging to
other Flocker clusters.
"""
blockdevice_api2 = ebsblockdeviceapi_for_test(
test_case=self,
)
flocker_volume = blockdevice_api2.create_volume(
dataset_id=uuid4(),
size=self.minimum_allocatable_size,
)
self.assert_foreign_volume(flocker_volume)
@capture_logging(lambda self, logger: None)
def test_boto_ec2response_error(self, logger):
"""
1. Invalid parameters to Boto's EBS API calls
raise the right exception after logging to Eliot.
2. Verify Eliot log output for expected message fields
from logging decorator for boto.exception.EC2Exception
originating from boto.ec2.connection.EC2Connection.
"""
# Test 1: Create volume with size 0.
# Raises: EC2ResponseError
self.assertRaises(EC2ResponseError, self.api.create_volume,
dataset_id=uuid4(), size=0,)
# Test 2: Set EC2 connection zone to an invalid string.
# Raises: EC2ResponseError
self.api.zone = u'invalid_zone'
self.assertRaises(
EC2ResponseError,
self.api.create_volume,
dataset_id=uuid4(),
size=self.minimum_allocatable_size,
)
# Validate decorated method for exception logging
# actually logged to ``Eliot`` logger.
expected_message_keys = {AWS_CODE.key, AWS_MESSAGE.key,
AWS_REQUEST_ID.key}
for logged in LoggedMessage.of_type(logger.messages,
BOTO_EC2RESPONSE_ERROR,):
key_subset = set(key for key in expected_message_keys
if key in logged.message.keys())
self.assertEqual(expected_message_keys, key_subset)
@capture_logging(None)
def test_boto_request_logging(self, logger):
"""
Boto is configured to send log events to Eliot when it makes an AWS API
request.
"""
self.api.list_volumes()
messages = list(
message
for message
in logger.messages
if message.get("message_type") == BOTO_LOG_HEADER
)
self.assertNotEqual(
[], messages,
"Didn't find Boto messages in logged messages {}".format(
messages
)
)
def test_next_device_in_use(self):
"""
``_next_device`` skips devices indicated as being in use.
Ideally we'd have a test for this using the public API, but this
only occurs if we hit eventually consistent ignorance in the AWS
servers so it's hard to trigger deterministically.
"""
result = self.api._next_device(self.api.compute_instance_id(), [],
{u"/dev/sdf"})
self.assertEqual(result, u"/dev/sdg")
@capture_logging(
assertHasMessage, IN_USE_DEVICES, {
'devices': [u'/dev/sda1']
},
)
def test_in_use_devices_log(self, logger):
"""
Attached device shows up as being in use during subsequent
``attach_volume``.
"""
volume1 = self.api.create_volume(
dataset_id=uuid4(),
size=self.minimum_allocatable_size,
)
self.api.attach_volume(
volume1.blockdevice_id, attach_to=self.this_node,
)
class VolumeStateTransitionTests(TestCase):
"""
Tests for volume state operations and resulting volume state changes.
"""
class VolumeEndStateTypes(Names):
"""
Types of volume states to simulate.
"""
ERROR_STATE = NamedConstant()
TRANSIT_STATE = NamedConstant()
DESTINATION_STATE = NamedConstant()
class VolumeAttachDataTypes(Names):
"""
Types of volume's attach data states to simulate.
"""
MISSING_ATTACH_DATA = NamedConstant()
MISSING_INSTANCE_ID = NamedConstant()
MISSING_DEVICE = NamedConstant()
ATTACH_SUCCESS = NamedConstant()
DETACH_SUCCESS = NamedConstant()
V = VolumeOperations
S = VolumeEndStateTypes
A = VolumeAttachDataTypes
def _create_template_ebs_volume(self, operation):
"""
Helper function to create template EBS volume to work on.
:param NamedConstant operation: Intended use of created template.
A value from ``VolumeOperations``.
:returns: Suitable volume in the right start state for input operation.
:rtype: boto.ec2.volume.Volume
"""
volume = EbsVolume()
# Irrelevant volume attributes.
volume.id = u'vol-9c48a689'
volume.create_time = u'2015-07-14T22:46:00.447Z'
volume.size = 1
volume.snapshot_id = ''
volume.zone = u'us-west-2b'
volume.type = u'standard'
volume_state_table = VolumeStateTable()
state_flow = volume_state_table.table[operation]
start_state = state_flow.start_state.value
# Interesting volume attribute.
volume.status = start_state
return volume
def _pick_end_state(self, operation, state_type):
"""
Helper function to pick a desired volume state for given input
operation.
:param NamedConstant operation: Volume operation to pick a
state for. A value from ``VolumeOperations``.
:param NamedConstant state_type: Volume state type request.
:returns: A state from ``VolumeStates`` that will not be part of
a volume's states resulting from input operation.
:rtype: ValueConstant
"""
volume_state_table = VolumeStateTable()
state_flow = volume_state_table.table[operation]
if state_type == self.S.ERROR_STATE:
valid_states = set([state_flow.start_state,
state_flow.transient_state,
state_flow.end_state])
err_states = set(VolumeStates._enumerants.values()) - valid_states
err_state = err_states.pop()
return err_state.value
elif state_type == self.S.TRANSIT_STATE:
return state_flow.transient_state.value
elif state_type == self.S.DESTINATION_STATE:
return state_flow.end_state.value
def _pick_attach_data(self, attach_type):
"""
Helper function to create desired volume attach data.
:param NamedConstant attach_type: Type of attach data to create.
:returns: Volume attachment set that conforms to requested attach type.
:rtype: AttachmentSet
"""
if attach_type == self.A.MISSING_ATTACH_DATA:
return None
elif attach_type == self.A.MISSING_INSTANCE_ID:
attach_data = AttachmentSet()
attach_data.device = u'/dev/sdf'
attach_data.instance_id = ''
return attach_data
elif attach_type == self.A.MISSING_DEVICE:
attach_data = AttachmentSet()
attach_data.device = ''
attach_data.instance_id = u'i-xyz'
return attach_data
elif attach_type == self.A.ATTACH_SUCCESS:
attach_data = AttachmentSet()
attach_data.device = u'/dev/sdf'
attach_data.instance_id = u'i-xyz'
return attach_data
elif attach_type == self.A.DETACH_SUCCESS:
return None
def _custom_update(self, operation, state_type,
attach_data=A.MISSING_ATTACH_DATA):
"""
Create a custom update function for a volume.
"""
def update(volume):
"""
Transition volume to desired end state and attach data.
:param boto.ec2.volume.Volume volume: Volume to move to
invalid state.
"""
volume.status = self._pick_end_state(operation, state_type)
volume.attach_data = self._pick_attach_data(attach_data)
return update
def _assert_unexpected_state_exception(self, operation,
volume_end_state_type,
attach_type=A.MISSING_ATTACH_DATA):
"""
Assert that configured volume state change for given testcase indicates
incomplete operation execution.
"""
volume = self._create_template_ebs_volume(operation)
update = self._custom_update(operation, volume_end_state_type,
attach_type)
start_time = time.time()
self.assertRaises(UnexpectedStateException, _should_finish,
operation, volume, update, start_time, TIMEOUT)
def _assert_fail(self, operation, volume_end_state_type,
attach_data_type=A.MISSING_ATTACH_DATA):
"""
Assert that configured volume state change for given testcase indicates
incomplete operation execution.
"""
volume = self._create_template_ebs_volume(operation)
update = self._custom_update(operation, volume_end_state_type,
attach_data_type)
start_time = time.time()
finish_result = _should_finish(operation, volume, update, start_time)
self.assertEqual(False, finish_result)
def _assert_timeout(self, operation, testcase,
attach_data_type=A.MISSING_ATTACH_DATA):
"""
Helper function to validate that ``TimeoutException`` is raised as
a result of performing input operation for given testcase on a volume.
"""
volume = self._create_template_ebs_volume(operation)
update = self._custom_update(operation, testcase, attach_data_type)
start_time = time.time()
time.sleep(TIMEOUT)
self.assertRaises(TimeoutException, _should_finish,
operation, volume, update, start_time, TIMEOUT)
def _process_volume(self, operation, testcase,
attach_data_type=A.ATTACH_SUCCESS):
"""
Helper function to validate that performing given operation for given
testcase on a volume succeeds.
"""
volume = self._create_template_ebs_volume(operation)
_wait_for_volume_state_change(operation, volume,
self._custom_update(operation, testcase,
attach_data_type),
TIMEOUT)
return volume
def test_create_invalid_state(self):
"""
Assert that error volume state during creation raises
``UnexpectedStateException``.
"""
self._assert_unexpected_state_exception(self.V.CREATE,
self.S.ERROR_STATE)
def test_destroy_invalid_state(self):
"""
Assert that error volume state during destroy raises
``UnexpectedStateException``.
"""
self._assert_unexpected_state_exception(self.V.DESTROY,
self.S.ERROR_STATE)
def test_attach_invalid_state(self):
"""
Assert that error volume state during attach raises
``UnexpectedStateException``.
"""
self._assert_unexpected_state_exception(self.V.ATTACH,
self.S.ERROR_STATE)
def test_detach_invalid_state(self):
"""
Assert that error volume state during detach raises
``UnexpectedStateException``.
"""
self._assert_unexpected_state_exception(self.V.DETACH,
self.S.ERROR_STATE)
def test_stuck_create(self):
"""
Assert that stuck create state indicates operation in progress.
"""
self._assert_fail(self.V.CREATE, self.S.TRANSIT_STATE)
def test_stuck_destroy(self):
"""
Assert that stuck destroy state indicates operation in progress.
"""
self._assert_fail(self.V.DESTROY, self.S.TRANSIT_STATE)
def test_stuck_attach(self):
"""
Assert that stuck attach state indicates operation in progress.
"""
self._assert_fail(self.V.ATTACH, self.S.TRANSIT_STATE)
def test_stuck_detach(self):
"""
Assert that stuck detach state indicates operation in progress.
"""
self._assert_fail(self.V.DETACH, self.S.TRANSIT_STATE)
def test_attach_missing_attach_data(self):
"""
Assert that missing attach data indicates attach in progress.
"""
self._assert_fail(self.V.ATTACH, self.S.DESTINATION_STATE)
def test_attach_missing_instance_id(self):
"""
Assert that missing attach instance id indicates attach in progress.
"""
self._assert_fail(self.V.ATTACH, self.S.DESTINATION_STATE,
self.A.MISSING_INSTANCE_ID)
def test_attach_missing_device(self):
"""
Assert that missing attached device name indicates attach in progress.
"""
self._assert_fail(self.V.ATTACH, self.S.DESTINATION_STATE,
self.A.MISSING_DEVICE)
def test_timeout(self):
"""
Assert that ``TimeoutException`` is thrown if volume state transition
takes longer than configured timeout.
"""
self._assert_timeout(self.V.ATTACH, self.S.DESTINATION_STATE)
def test_create_success(self):
"""
Assert that successful volume creation leads to valid volume end state.
"""
volume = self._process_volume(self.V.CREATE, self.S.DESTINATION_STATE)
self.assertEqual(volume.status, u'available')
def test_destroy_success(self):
"""
Assert that successful volume destruction leads to valid end state.
"""
volume = self._process_volume(self.V.DESTROY, self.S.DESTINATION_STATE)
self.assertEquals(volume.status, u'')
def test_attach_sucess(self):
"""
Test if successful attach volume operation leads to expected state.
"""
volume = self._process_volume(self.V.ATTACH, self.S.DESTINATION_STATE)
self.assertEqual([volume.status, volume.attach_data.device,
volume.attach_data.instance_id],
[u'in-use', u'/dev/sdf', u'i-xyz'])
def test_detach_success(self):
"""
Test if successful detach volume operation leads to expected state.
"""
volume = self._process_volume(self.V.DETACH, self.S.DESTINATION_STATE,
self.A.DETACH_SUCCESS)
self.assertEqual(volume.status, u'available')
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Script to automate creating builds of projects."""
from __future__ import print_function
import argparse
import logging
import os
import subprocess
import sys
from l2tdevtools import build_helper
from l2tdevtools import dependencies
from l2tdevtools import download_helper
from l2tdevtools import source_helper
# Since os.path.abspath() uses the current working directory (cwd)
# os.path.abspath(__file__) will point to a different location if
# cwd has been changed. Hence we preserve the absolute location of __file__.
__file__ = os.path.abspath(__file__)
# TODO: look into merging functionality with update dependencies script.
class DependencyBuilder(object):
"""Class that helps in building dependencies."""
# TODO: add phases for building sleuthkit/pytsk.
# The distributions to build dpkg-source packages for.
_DPKG_SOURCE_DISTRIBUTIONS = frozenset([
u'precise', u'trusty', u'vivid', u'wily'])
_LIBYAL_LIBRARIES = frozenset([u'libewf'])
def __init__(self, build_target):
"""Initializes the dependency builder.
Args:
build_target: the build target.
"""
super(DependencyBuilder, self).__init__()
self._build_target = build_target
def _BuildDependency(self, download_helper_object, dependency_definition):
"""Builds a dependency.
Args:
download_helper_object: the download helper (instance of DownloadHelper).
dependency_definition: the dependency definition object (instance of
DependencyDefinition).
Returns:
True if the build is successful or False on error.
"""
source_helper_object = source_helper.SourcePackageHelper(
dependency_definition.name, download_helper_object)
source_helper_object.Clean()
# Unify http:// and https:// URLs for the download helper check.
download_url = dependency_definition.download_url
if download_url.startswith(u'https://'):
download_url = u'http://{0:s}'.format(download_url[8:])
if self._build_target == u'download':
source_filename = source_helper_object.Download()
# If available run the script post-download.sh after download.
if os.path.exists(u'post-download.sh'):
command = u'sh ./post-download.sh {0:s}'.format(source_filename)
exit_code = subprocess.call(command, shell=True)
if exit_code != 0:
logging.error(u'Running: "{0:s}" failed.'.format(command))
return False
elif dependency_definition.build_system == u'configure_make':
if not self._BuildConfigureMake(
source_helper_object, dependency_definition):
return False
elif dependency_definition.build_system == u'setup_py':
if not self._BuildSetupPy(
source_helper_object, dependency_definition):
return False
else:
logging.warning(u'Unable to determine how to build: {0:s}'.format(
dependency_definition.name))
return False
return True
def _BuildConfigureMake(self, source_helper_object, dependency_definition):
"""Builds a Python module that comes with configure and make.
Args:
source_helper_object: the source helper (instance of SourceHelper).
dependency_definition: the dependency definition object (instance of
DependencyDefinition).
Returns:
True if the build is successful or False on error.
"""
tools_path = os.path.dirname(__file__)
data_path = os.path.join(os.path.dirname(tools_path), u'data')
build_helper_object = None
distributions = [None]
if self._build_target == u'dpkg':
build_helper_object = build_helper.ConfigureMakeDpkgBuildHelper(
dependency_definition, data_path)
elif self._build_target == u'dpkg-source':
build_helper_object = build_helper.ConfigureMakeSourceDpkgBuildHelper(
dependency_definition, data_path)
distributions = self._DPKG_SOURCE_DISTRIBUTIONS
elif self._build_target == u'msi':
build_helper_object = build_helper.ConfigureMakeMsiBuildHelper(
dependency_definition, data_path, tools_path)
elif self._build_target == u'pkg':
build_helper_object = build_helper.ConfigureMakePkgBuildHelper(
dependency_definition, data_path)
elif self._build_target == u'rpm':
build_helper_object = build_helper.ConfigureMakeRpmBuildHelper(
dependency_definition, data_path)
if not build_helper_object:
return False
build_dependencies = build_helper_object.CheckBuildDependencies()
if build_dependencies:
logging.warning(
u'Missing build dependencies: {0:s}.'.format(
u', '.join(build_dependencies)))
return False
for distribution in distributions:
if distribution:
build_helper_object.distribution = distribution
output_filename = build_helper_object.GetOutputFilename(
source_helper_object)
build_helper_object.Clean(source_helper_object)
if not os.path.exists(output_filename):
if not build_helper_object.Build(source_helper_object):
if not os.path.exists(build_helper_object.LOG_FILENAME):
logging.warning(u'Build of: {0:s} failed.'.format(
source_helper_object.project_name))
else:
log_filename = u'{0:s}_{1:s}'.format(
source_helper_object.project_name,
build_helper_object.LOG_FILENAME)
# Remove older logfiles if they exists otherwise the rename
# fails on Windows.
if os.path.exists(log_filename):
os.remove(log_filename)
os.rename(build_helper_object.LOG_FILENAME, log_filename)
logging.warning((
u'Build of: {0:s} failed, for more information check '
u'{1:s}').format(
source_helper_object.project_name, log_filename))
return False
if os.path.exists(build_helper_object.LOG_FILENAME):
logging.info(u'Removing: {0:s}'.format(
build_helper_object.LOG_FILENAME))
os.remove(build_helper_object.LOG_FILENAME)
return True
def _BuildSetupPy(self, source_helper_object, dependency_definition):
"""Builds a Python module that comes with setup.py.
Args:
source_helper_object: the source helper (instance of SourceHelper).
dependency_definition: the dependency definition object (instance of
DependencyDefinition).
Returns:
True if the build is successful or False on error.
"""
tools_path = os.path.dirname(__file__)
data_path = os.path.join(os.path.dirname(tools_path), u'data')
build_helper_object = None
distributions = [None]
if self._build_target == u'dpkg':
build_helper_object = build_helper.SetupPyDpkgBuildHelper(
dependency_definition, data_path)
elif self._build_target == u'dpkg-source':
build_helper_object = build_helper.SetupPySourceDpkgBuildHelper(
dependency_definition, data_path)
distributions = self._DPKG_SOURCE_DISTRIBUTIONS
elif self._build_target == u'msi':
build_helper_object = build_helper.SetupPyMsiBuildHelper(
dependency_definition, data_path)
elif self._build_target == u'pkg':
build_helper_object = build_helper.SetupPyPkgBuildHelper(
dependency_definition, data_path)
elif self._build_target == u'rpm':
build_helper_object = build_helper.SetupPyRpmBuildHelper(
dependency_definition, data_path)
if not build_helper_object:
return False
build_dependencies = build_helper_object.CheckBuildDependencies()
if build_dependencies:
logging.warning(
u'Missing build dependencies: {0:s}.'.format(
u', '.join(build_dependencies)))
return False
for distribution in distributions:
if distribution:
build_helper_object.distribution = distribution
output_filename = build_helper_object.GetOutputFilename(
source_helper_object)
build_helper_object.Clean(source_helper_object)
if not os.path.exists(output_filename):
if not build_helper_object.Build(source_helper_object):
if not os.path.exists(build_helper_object.LOG_FILENAME):
logging.warning(u'Build of: {0:s} failed.'.format(
source_helper_object.project_name))
else:
log_filename = u'{0:s}_{1:s}'.format(
source_helper_object.project_name,
build_helper_object.LOG_FILENAME)
# Remove older logfiles if they exists otherwise the rename
# fails on Windows.
if os.path.exists(log_filename):
os.remove(log_filename)
os.rename(build_helper_object.LOG_FILENAME, log_filename)
logging.warning((
u'Build of: {0:s} failed, for more information check '
u'{1:s}').format(
source_helper_object.project_name, log_filename))
return False
if os.path.exists(build_helper_object.LOG_FILENAME):
logging.info(u'Removing: {0:s}'.format(
build_helper_object.LOG_FILENAME))
os.remove(build_helper_object.LOG_FILENAME)
return True
def Build(self, dependency_definition):
"""Builds a dependency.
Args:
dependency_definition: the dependency definition object (instance of
DependencyDefinition).
Returns:
True if the build is successful or False on error.
Raises:
ValueError: if the project type is unsupported.
"""
download_url = dependency_definition.download_url
if download_url.endswith(u'/'):
download_url = download_url[:-1]
# Unify http:// and https:// URLs for the download helper check.
if download_url.startswith(u'https://'):
download_url = u'http://{0:s}'.format(download_url[8:])
if (download_url.startswith(u'http://code.google.com/p/') and
download_url.endswith(u'/downloads/list')):
download_helper_object = download_helper.GoogleCodeWikiDownloadHelper()
elif download_url.startswith(u'http://pypi.python.org/pypi/'):
download_helper_object = download_helper.PyPiDownloadHelper()
elif (download_url.startswith(u'http://sourceforge.net/projects/') and
download_url.endswith(u'/files')):
download_helper_object = download_helper.SourceForgeDownloadHelper()
# TODO: make this a more generic github download helper when
# when Google Drive support is no longer needed.
elif (download_url.startswith(u'http://github.com/libyal/') or
download_url.startswith(u'http://googledrive.com/host/')):
download_helper_object = download_helper.LibyalGitHubDownloadHelper()
elif (download_url.startswith(u'http://github.com/') and
download_url.endswith(u'/releases')):
organization, _, _ = download_url[18:-9].rpartition(u'/')
download_helper_object = (
download_helper.GithubReleasesDownloadHelper(organization))
else:
raise ValueError(u'Unsupported download URL: {0:s}.'.format(download_url))
return self._BuildDependency(download_helper_object, dependency_definition)
def Main():
build_targets = frozenset([
u'download', u'dpkg', u'dpkg-source', u'msi', u'pkg', u'rpm'])
argument_parser = argparse.ArgumentParser(description=(
u'Downloads and builds the latest versions of projects.'))
argument_parser.add_argument(
u'build_target', choices=sorted(build_targets), action=u'store',
metavar=u'BUILD_TARGET', default=None, help=u'The build target.')
argument_parser.add_argument(
u'--build-directory', u'--build_directory', action=u'store',
metavar=u'DIRECTORY', dest=u'build_directory', type=unicode,
default=u'build', help=u'The location of the the build directory.')
argument_parser.add_argument(
u'-c', u'--config', dest=u'config_file', action=u'store',
metavar=u'CONFIG_FILE', default=None,
help=u'path of the build configuration file.')
argument_parser.add_argument(
u'--projects', dest=u'projects', action=u'store',
metavar=u'PROJECT_NAME(S)', default=None,
help=(
u'comma separated list of specific project names to build. The '
u'default is to build all project defined in the configuration '
u'file.'))
options = argument_parser.parse_args()
if not options.build_target:
print(u'Build target missing.')
print(u'')
argument_parser.print_help()
print(u'')
return False
if options.build_target not in build_targets:
print(u'Unsupported build target: {0:s}.'.format(options.build_target))
print(u'')
argument_parser.print_help()
print(u'')
return False
if not options.config_file:
options.config_file = os.path.dirname(__file__)
options.config_file = os.path.dirname(options.config_file)
options.config_file = os.path.join(
options.config_file, u'data', u'projects.ini')
if not os.path.exists(options.config_file):
print(u'No such config file: {0:s}.'.format(options.config_file))
print(u'')
return False
logging.basicConfig(
level=logging.INFO, format=u'[%(levelname)s] %(message)s')
dependency_builder = DependencyBuilder(options.build_target)
# TODO: package ipython.
# TODO:
# (u'protobuf', DependencyBuilder.PROJECT_TYPE_GOOGLE_CODE_WIKI),
# ./configure
# make
# cd python
# python setup.py build
# python setup.py install --root $PWD/tmp
#
# Build of rpm fails:
# python setup.py bdist_rpm
#
# Solution: use protobuf-python.spec to build
# TODO: rpm build of psutil is broken, fix upstream or add patching.
# (u'psutil', DependencyBuilder.PROJECT_TYPE_PYPI),
if options.projects:
projects = options.projects.split(u',')
else:
projects = []
builds = []
with open(options.config_file) as file_object:
dependency_definition_reader = dependencies.DependencyDefinitionReader()
for dependency_definition in dependency_definition_reader.Read(file_object):
is_disabled = False
if (options.build_target in dependency_definition.disabled or
u'all' in dependency_definition.disabled):
if dependency_definition.name not in projects:
is_disabled = True
else:
# If a project is manually specified ignore the disabled status.
logging.info(u'Ignoring disabled status for: {0:s}'.format(
dependency_definition.name))
if not is_disabled:
builds.append(dependency_definition)
if not os.path.exists(options.build_directory):
os.mkdir(options.build_directory)
current_working_directory = os.getcwd()
os.chdir(options.build_directory)
failed_builds = []
for dependency_definition in builds:
if dependency_definition.name not in projects:
continue
logging.info(u'Processing: {0:s}'.format(dependency_definition.name))
# TODO: add support for dokan, bzip2
# TODO: setup sqlite in build directory.
if not dependency_builder.Build(dependency_definition):
print(u'Failed building: {0:s}'.format(dependency_definition.name))
failed_builds.append(dependency_definition.name)
os.chdir(current_working_directory)
if failed_builds:
print(u'')
print(u'Failed buiding:')
for failed_build in failed_builds:
print(u'\t{0:s}'.format(failed_build))
return not failed_builds
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| |
'''
Text Markup
===========
.. versionadded:: 1.1.0
.. versionchanged:: 1.10.1
Added `font_context`, `font_features` and `text_language` (Pango only)
We provide a simple text-markup for inline text styling. The syntax look the
same as the `BBCode <http://en.wikipedia.org/wiki/BBCode>`_.
A tag is defined as ``[tag]``, and should have a corresponding
``[/tag]`` closing tag. For example::
[b]Hello [color=ff0000]world[/color][/b]
The following tags are available:
``[b][/b]``
Activate bold text
``[i][/i]``
Activate italic text
``[u][/u]``
Underlined text
``[s][/s]``
Strikethrough text
``[font=<str>][/font]``
Change the font (note: this refers to a TTF file or registered alias)
``[font_context=<str>][/font_context]``
Change context for the font, use string value "none" for isolated context.
``[font_family=<str>][/font_family]``
Font family to request for drawing. This is only valid when using a
font context, and takes precedence over `[font]`. See
:class:`kivy.uix.label.Label` for details.
``[font_features=<str>][/font_features]``
OpenType font features, in CSS format, this is passed straight
through to Pango. The effects of requesting a feature depends on loaded
fonts, library versions, etc. Pango only, requires v1.38 or later.
``[size=<size>][/size]``
Change the font size. <size> should be an integer, optionally with a
unit (i.e. ``16sp``)
``[color=#<color>][/color]``
Change the text color
``[ref=<str>][/ref]``
Add an interactive zone. The reference + all the word box inside the
reference will be available in :attr:`MarkupLabel.refs`
``[anchor=<str>]``
Put an anchor in the text. You can get the position of your anchor within
the text with :attr:`MarkupLabel.anchors`
``[sub][/sub]``
Display the text at a subscript position relative to the text before it.
``[sup][/sup]``
Display the text at a superscript position relative to the text before it.
``[text_language=<str>][/text_language]``
Language of the text, this is an RFC-3066 format language tag (as string),
for example "en_US", "zh_CN", "fr" or "ja". This can impact font selection,
metrics and rendering. For example, the same bytes of text can look
different for `ur` and `ar` languages, though both use Arabic script.
Use the string `'none'` to revert to locale detection. Pango only.
If you need to escape the markup from the current text, use
:func:`kivy.utils.escape_markup`.
'''
__all__ = ('MarkupLabel', )
import re
from kivy.properties import dpi2px
from kivy.parser import parse_color
from kivy.logger import Logger
from kivy.core.text import Label, LabelBase
from kivy.core.text.text_layout import layout_text, LayoutWord, LayoutLine
from copy import copy
from functools import partial
# We need to do this trick when documentation is generated
MarkupLabelBase = Label
if Label is None:
MarkupLabelBase = LabelBase
class MarkupLabel(MarkupLabelBase):
'''Markup text label.
See module documentation for more information.
'''
def __init__(self, *largs, **kwargs):
self._style_stack = {}
self._refs = {}
self._anchors = {}
super(MarkupLabel, self).__init__(*largs, **kwargs)
self._internal_size = 0, 0
self._cached_lines = []
@property
def refs(self):
'''Get the bounding box of all the ``[ref=...]``::
{ 'refA': ((x1, y1, x2, y2), (x1, y1, x2, y2)), ... }
'''
return self._refs
@property
def anchors(self):
'''Get the position of all the ``[anchor=...]``::
{ 'anchorA': (x, y), 'anchorB': (x, y), ... }
'''
return self._anchors
@property
def markup(self):
'''Return the text with all the markup split::
>>> MarkupLabel('[b]Hello world[/b]').markup
>>> ('[b]', 'Hello world', '[/b]')
'''
s = re.split(r'(\[.*?\])', self.label)
s = [x for x in s if x != '']
return s
def _push_style(self, k):
if k not in self._style_stack:
self._style_stack[k] = []
self._style_stack[k].append(self.options[k])
def _pop_style(self, k):
if k not in self._style_stack or len(self._style_stack[k]) == 0:
Logger.warning('Label: pop style stack without push')
return
v = self._style_stack[k].pop()
self.options[k] = v
def render(self, real=False):
options = copy(self.options)
if not real:
ret = self._pre_render()
else:
ret = self._render_real()
self.options = options
return ret
def _pre_render(self):
# split markup, words, and lines
# result: list of word with position and width/height
# during the first pass, we don't care about h/valign
self._cached_lines = lines = []
self._refs = {}
self._anchors = {}
clipped = False
w = h = 0
uw, uh = self.text_size
spush = self._push_style
spop = self._pop_style
options = self.options
options['_ref'] = None
options['_anchor'] = None
options['script'] = 'normal'
shorten = options['shorten']
# if shorten, then don't split lines to fit uw, because it will be
# flattened later when shortening and broken up lines if broken
# mid-word will have space mid-word when lines are joined
uw_temp = None if shorten else uw
xpad = options['padding_x']
uhh = (None if uh is not None and options['valign'] != 'top' or
options['shorten'] else uh)
options['strip'] = options['strip'] or options['halign'] == 'justify'
find_base_dir = Label.find_base_direction
base_dir = options['base_direction']
self._resolved_base_dir = None
for item in self.markup:
if item == '[b]':
spush('bold')
options['bold'] = True
self.resolve_font_name()
elif item == '[/b]':
spop('bold')
self.resolve_font_name()
elif item == '[i]':
spush('italic')
options['italic'] = True
self.resolve_font_name()
elif item == '[/i]':
spop('italic')
self.resolve_font_name()
elif item == '[u]':
spush('underline')
options['underline'] = True
self.resolve_font_name()
elif item == '[/u]':
spop('underline')
self.resolve_font_name()
elif item == '[s]':
spush('strikethrough')
options['strikethrough'] = True
self.resolve_font_name()
elif item == '[/s]':
spop('strikethrough')
self.resolve_font_name()
elif item[:6] == '[size=':
item = item[6:-1]
try:
if item[-2:] in ('px', 'pt', 'in', 'cm', 'mm', 'dp', 'sp'):
size = dpi2px(item[:-2], item[-2:])
else:
size = int(item)
except ValueError:
raise
size = options['font_size']
spush('font_size')
options['font_size'] = size
elif item == '[/size]':
spop('font_size')
elif item[:7] == '[color=':
color = parse_color(item[7:-1])
spush('color')
options['color'] = color
elif item == '[/color]':
spop('color')
elif item[:6] == '[font=':
fontname = item[6:-1]
spush('font_name')
options['font_name'] = fontname
self.resolve_font_name()
elif item == '[/font]':
spop('font_name')
self.resolve_font_name()
elif item[:13] == '[font_family=':
spush('font_family')
options['font_family'] = item[13:-1]
elif item == '[/font_family]':
spop('font_family')
elif item[:14] == '[font_context=':
fctx = item[14:-1]
if not fctx or fctx.lower() == 'none':
fctx = None
spush('font_context')
options['font_context'] = fctx
elif item == '[/font_context]':
spop('font_context')
elif item[:15] == '[font_features=':
spush('font_features')
options['font_features'] = item[15:-1]
elif item == '[/font_features]':
spop('font_features')
elif item[:15] == '[text_language=':
lang = item[15:-1]
if not lang or lang.lower() == 'none':
lang = None
spush('text_language')
options['text_language'] = lang
elif item == '[/text_language]':
spop('text_language')
elif item[:5] == '[sub]':
spush('font_size')
spush('script')
options['font_size'] = options['font_size'] * .5
options['script'] = 'subscript'
elif item == '[/sub]':
spop('font_size')
spop('script')
elif item[:5] == '[sup]':
spush('font_size')
spush('script')
options['font_size'] = options['font_size'] * .5
options['script'] = 'superscript'
elif item == '[/sup]':
spop('font_size')
spop('script')
elif item[:5] == '[ref=':
ref = item[5:-1]
spush('_ref')
options['_ref'] = ref
elif item == '[/ref]':
spop('_ref')
elif not clipped and item[:8] == '[anchor=':
options['_anchor'] = item[8:-1]
elif not clipped:
item = item.replace('&bl;', '[').replace(
'&br;', ']').replace('&', '&')
if not base_dir:
base_dir = self._resolved_base_dir = find_base_dir(item)
opts = copy(options)
extents = self.get_cached_extents()
opts['space_width'] = extents(' ')[0]
w, h, clipped = layout_text(
item, lines, (w, h), (uw_temp, uhh),
opts, extents,
append_down=True,
complete=False
)
if len(lines): # remove any trailing spaces from the last line
old_opts = self.options
self.options = copy(opts)
w, h, clipped = layout_text(
'', lines, (w, h), (uw_temp, uhh),
self.options, self.get_cached_extents(),
append_down=True,
complete=True
)
self.options = old_opts
self.is_shortened = False
if shorten:
options['_ref'] = None # no refs for you!
options['_anchor'] = None
w, h, lines = self.shorten_post(lines, w, h)
self._cached_lines = lines
# when valign is not top, for markup we layout everything (text_size[1]
# is temporarily set to None) and after layout cut to size if too tall
elif uh != uhh and h > uh and len(lines) > 1:
if options['valign'] == 'bottom':
i = 0
while i < len(lines) - 1 and h > uh:
h -= lines[i].h
i += 1
del lines[:i]
else: # middle
i = 0
top = int(h / 2. + uh / 2.) # remove extra top portion
while i < len(lines) - 1 and h > top:
h -= lines[i].h
i += 1
del lines[:i]
i = len(lines) - 1 # remove remaining bottom portion
while i and h > uh:
h -= lines[i].h
i -= 1
del lines[i + 1:]
# now justify the text
if options['halign'] == 'justify' and uw is not None:
# XXX: update refs to justified pos
# when justify, each line should've been stripped already
split = partial(re.split, re.compile('( +)'))
uww = uw - 2 * xpad
chr = type(self.text)
space = chr(' ')
empty = chr('')
for i in range(len(lines)):
line = lines[i]
words = line.words
# if there's nothing to justify, we're done
if (not line.w or int(uww - line.w) <= 0 or not len(words) or
line.is_last_line):
continue
done = False
parts = [None, ] * len(words) # contains words split by space
idxs = [None, ] * len(words) # indices of the space in parts
# break each word into spaces and add spaces until it's full
# do first round of split in case we don't need to split all
for w in range(len(words)):
word = words[w]
sw = word.options['space_width']
p = parts[w] = split(word.text)
idxs[w] = [v for v in range(len(p)) if
p[v].startswith(' ')]
# now we have the indices of the spaces in split list
for k in idxs[w]:
# try to add single space at each space
if line.w + sw > uww:
done = True
break
line.w += sw
word.lw += sw
p[k] += space
if done:
break
# there's not a single space in the line?
if not any(idxs):
continue
# now keep adding spaces to already split words until done
while not done:
for w in range(len(words)):
if not idxs[w]:
continue
word = words[w]
sw = word.options['space_width']
p = parts[w]
for k in idxs[w]:
# try to add single space at each space
if line.w + sw > uww:
done = True
break
line.w += sw
word.lw += sw
p[k] += space
if done:
break
# if not completely full, push last words to right edge
diff = int(uww - line.w)
if diff > 0:
# find the last word that had a space
for w in range(len(words) - 1, -1, -1):
if not idxs[w]:
continue
break
old_opts = self.options
self.options = word.options
word = words[w]
# split that word into left/right and push right till uww
l_text = empty.join(parts[w][:idxs[w][-1]])
r_text = empty.join(parts[w][idxs[w][-1]:])
left = LayoutWord(
word.options,
self.get_extents(l_text)[0],
word.lh,
l_text
)
right = LayoutWord(
word.options,
self.get_extents(r_text)[0],
word.lh,
r_text
)
left.lw = max(left.lw, word.lw + diff - right.lw)
self.options = old_opts
# now put words back together with right/left inserted
for k in range(len(words)):
if idxs[k]:
words[k].text = empty.join(parts[k])
words[w] = right
words.insert(w, left)
else:
for k in range(len(words)):
if idxs[k]:
words[k].text = empty.join(parts[k])
line.w = uww
w = max(w, uww)
self._internal_size = w, h
if uw:
w = uw
if uh:
h = uh
if h > 1 and w < 2:
w = 2
if w < 1:
w = 1
if h < 1:
h = 1
return int(w), int(h)
def render_lines(self, lines, options, render_text, y, size):
xpad = options['padding_x']
w = size[0]
halign = options['halign']
refs = self._refs
anchors = self._anchors
base_dir = options['base_direction'] or self._resolved_base_dir
auto_halign_r = halign == 'auto' and base_dir and 'rtl' in base_dir
for layout_line in lines: # for plain label each line has only one str
lw, lh = layout_line.w, layout_line.h
x = xpad
if halign == 'center':
x = int((w - lw) / 2.)
elif halign == 'right' or auto_halign_r:
x = max(0, int(w - lw - xpad))
layout_line.x = x
layout_line.y = y
psp = pph = 0
for word in layout_line.words:
options = self.options = word.options
# the word height is not scaled by line_height, only lh was
wh = options['line_height'] * word.lh
# calculate sub/super script pos
if options['script'] == 'superscript':
script_pos = max(0, psp if psp else self.get_descent())
psp = script_pos
pph = wh
elif options['script'] == 'subscript':
script_pos = min(lh - wh, ((psp + pph) - wh)
if pph else (lh - wh))
pph = wh
psp = script_pos
else:
script_pos = (lh - wh) / 1.25
psp = pph = 0
if len(word.text):
render_text(word.text, x, y + script_pos)
# should we record refs ?
ref = options['_ref']
if ref is not None:
if ref not in refs:
refs[ref] = []
refs[ref].append((x, y, x + word.lw, y + wh))
# Should we record anchors?
anchor = options['_anchor']
if anchor is not None:
if anchor not in anchors:
anchors[anchor] = (x, y)
x += word.lw
y += lh
return y
def shorten_post(self, lines, w, h, margin=2):
''' Shortens the text to a single line according to the label options.
This function operates on a text that has already been laid out because
for markup, parts of text can have different size and options.
If :attr:`text_size` [0] is None, the lines are returned unchanged.
Otherwise, the lines are converted to a single line fitting within the
constrained width, :attr:`text_size` [0].
:params:
`lines`: list of `LayoutLine` instances describing the text.
`w`: int, the width of the text in lines, including padding.
`h`: int, the height of the text in lines, including padding.
`margin` int, the additional space left on the sides. This is in
addition to :attr:`padding_x`.
:returns:
3-tuple of (xw, h, lines), where w, and h is similar to the input
and contains the resulting width / height of the text, including
padding. lines, is a list containing a single `LayoutLine`, which
contains the words for the line.
'''
def n(line, c):
''' A function similar to text.find, except it's an iterator that
returns successive occurrences of string c in list line. line is
not a string, but a list of LayoutWord instances that we walk
from left to right returning the indices of c in the words as we
encounter them. Note that the options can be different among the
words.
:returns:
3-tuple: the index of the word in line, the index of the
occurrence in word, and the extents (width) of the combined
words until this occurrence, not including the occurrence char.
If no more are found it returns (-1, -1, total_w) where total_w
is the full width of all the words.
'''
total_w = 0
for w in range(len(line)):
word = line[w]
if not word.lw:
continue
f = partial(word.text.find, c)
i = f()
while i != -1:
self.options = word.options
yield w, i, total_w + self.get_extents(word.text[:i])[0]
i = f(i + 1)
self.options = word.options
total_w += self.get_extents(word.text)[0]
yield -1, -1, total_w # this should never be reached, really
def p(line, c):
''' Similar to the `n` function, except it returns occurrences of c
from right to left in the list, line, similar to rfind.
'''
total_w = 0
offset = 0 if len(c) else 1
for w in range(len(line) - 1, -1, -1):
word = line[w]
if not word.lw:
continue
f = partial(word.text.rfind, c)
i = f()
while i != -1:
self.options = word.options
yield (w, i, total_w +
self.get_extents(word.text[i + 1:])[0])
if i:
i = f(0, i - offset)
else:
if not c:
self.options = word.options
yield (w, -1, total_w +
self.get_extents(word.text)[0])
break
self.options = word.options
total_w += self.get_extents(word.text)[0]
yield -1, -1, total_w # this should never be reached, really
def n_restricted(line, uw, c):
''' Similar to the function `n`, except it only returns the first
occurrence and it's not an iterator. Furthermore, if the first
occurrence doesn't fit within width uw, it returns the index of
whatever amount of text will still fit in uw.
:returns:
similar to the function `n`, except it's a 4-tuple, with the
last element a boolean, indicating if we had to clip the text
to fit in uw (True) or if the whole text until the first
occurrence fitted in uw (False).
'''
total_w = 0
if not len(line):
return 0, 0, 0
for w in range(len(line)):
word = line[w]
f = partial(word.text.find, c)
self.options = word.options
extents = self.get_cached_extents()
i = f()
if i != -1:
ww = extents(word.text[:i])[0]
if i != -1 and total_w + ww <= uw: # found and it fits
return w, i, total_w + ww, False
elif i == -1:
ww = extents(word.text)[0]
if total_w + ww <= uw: # wasn't found and all fits
total_w += ww
continue
i = len(word.text)
# now just find whatever amount of the word does fit
e = 0
while e != i and total_w + extents(word.text[:e])[0] <= uw:
e += 1
e = max(0, e - 1)
return w, e, total_w + extents(word.text[:e])[0], True
return -1, -1, total_w, False
def p_restricted(line, uw, c):
''' Similar to `n_restricted`, except it returns the first
occurrence starting from the right, like `p`.
'''
total_w = 0
if not len(line):
return 0, 0, 0
for w in range(len(line) - 1, -1, -1):
word = line[w]
f = partial(word.text.rfind, c)
self.options = word.options
extents = self.get_cached_extents()
i = f()
if i != -1:
ww = extents(word.text[i + 1:])[0]
if i != -1 and total_w + ww <= uw: # found and it fits
return w, i, total_w + ww, False
elif i == -1:
ww = extents(word.text)[0]
if total_w + ww <= uw: # wasn't found and all fits
total_w += ww
continue
# now just find whatever amount of the word does fit
s = len(word.text) - 1
while s >= 0 and total_w + extents(word.text[s:])[0] <= uw:
s -= 1
return w, s, total_w + extents(word.text[s + 1:])[0], True
return -1, -1, total_w, False
textwidth = self.get_cached_extents()
uw = self.text_size[0]
if uw is None:
return w, h, lines
old_opts = copy(self.options)
uw = max(0, int(uw - old_opts['padding_x'] * 2 - margin))
chr = type(self.text)
ssize = textwidth(' ')
c = old_opts['split_str']
line_height = old_opts['line_height']
xpad, ypad = old_opts['padding_x'], old_opts['padding_y']
dir = old_opts['shorten_from'][0]
# flatten lines into single line
line = []
last_w = 0
for l in range(len(lines)):
# concatenate (non-empty) inside lines with a space
this_line = lines[l]
if last_w and this_line.w and not this_line.line_wrap:
line.append(LayoutWord(old_opts, ssize[0], ssize[1], chr(' ')))
last_w = this_line.w or last_w
for word in this_line.words:
if word.lw:
line.append(word)
# if that fits, just return the flattened line
lw = sum([word.lw for word in line])
if lw <= uw:
lh = max([word.lh for word in line] + [0]) * line_height
self.is_shortened = False
return (
lw + 2 * xpad,
lh + 2 * ypad,
[LayoutLine(0, 0, lw, lh, 1, 0, line)]
)
elps_opts = copy(old_opts)
if 'ellipsis_options' in old_opts:
elps_opts.update(old_opts['ellipsis_options'])
# Set new opts for ellipsis
self.options = elps_opts
# find the size of ellipsis that'll fit
elps_s = textwidth('...')
if elps_s[0] > uw: # even ellipsis didn't fit...
self.is_shortened = True
s = textwidth('..')
if s[0] <= uw:
return (
s[0] + 2 * xpad,
s[1] * line_height + 2 * ypad,
[LayoutLine(
0, 0, s[0], s[1], 1, 0,
[LayoutWord(old_opts, s[0], s[1], '..')])]
)
else:
s = textwidth('.')
return (
s[0] + 2 * xpad,
s[1] * line_height + 2 * ypad,
[LayoutLine(
0, 0, s[0], s[1], 1, 0,
[LayoutWord(old_opts, s[0], s[1], '.')])]
)
elps = LayoutWord(elps_opts, elps_s[0], elps_s[1], '...')
uw -= elps_s[0]
# Restore old opts
self.options = old_opts
# now find the first left and right words that fit
w1, e1, l1, clipped1 = n_restricted(line, uw, c)
w2, s2, l2, clipped2 = p_restricted(line, uw, c)
if dir != 'l': # center or right
line1 = None
if clipped1 or clipped2 or l1 + l2 > uw:
# if either was clipped or both don't fit, just take first
if len(c):
self.options = old_opts
old_opts['split_str'] = ''
res = self.shorten_post(lines, w, h, margin)
self.options['split_str'] = c
self.is_shortened = True
return res
line1 = line[:w1]
last_word = line[w1]
last_text = last_word.text[:e1]
self.options = last_word.options
s = self.get_extents(last_text)
line1.append(LayoutWord(last_word.options, s[0], s[1],
last_text))
elif (w1, e1) == (-1, -1): # this shouldn't occur
line1 = line
if line1:
line1.append(elps)
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
self.is_shortened = True
return (
lw + 2 * xpad,
lh + 2 * ypad,
[LayoutLine(0, 0, lw, lh, 1, 0, line1)]
)
# now we know that both the first and last word fit, and that
# there's at least one instances of the split_str in the line
if (w1, e1) != (w2, s2): # more than one split_str
if dir == 'r':
f = n(line, c) # iterator
assert next(f)[:-1] == (w1, e1) # first word should match
ww1, ee1, l1 = next(f)
while l2 + l1 <= uw:
w1, e1 = ww1, ee1
ww1, ee1, l1 = next(f)
if (w1, e1) == (w2, s2):
break
else: # center
f = n(line, c) # iterator
f_inv = p(line, c) # iterator
assert next(f)[:-1] == (w1, e1)
assert next(f_inv)[:-1] == (w2, s2)
while True:
if l1 <= l2:
ww1, ee1, l1 = next(f) # hypothesize that next fit
if l2 + l1 > uw:
break
w1, e1 = ww1, ee1
if (w1, e1) == (w2, s2):
break
else:
ww2, ss2, l2 = next(f_inv)
if l2 + l1 > uw:
break
w2, s2 = ww2, ss2
if (w1, e1) == (w2, s2):
break
else: # left
line1 = [elps]
if clipped1 or clipped2 or l1 + l2 > uw:
# if either was clipped or both don't fit, just take last
if len(c):
self.options = old_opts
old_opts['split_str'] = ''
res = self.shorten_post(lines, w, h, margin)
self.options['split_str'] = c
self.is_shortened = True
return res
first_word = line[w2]
first_text = first_word.text[s2 + 1:]
self.options = first_word.options
s = self.get_extents(first_text)
line1.append(LayoutWord(first_word.options, s[0], s[1],
first_text))
line1.extend(line[w2 + 1:])
elif (w1, e1) == (-1, -1): # this shouldn't occur
line1 = line
if len(line1) != 1:
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
self.is_shortened = True
return (
lw + 2 * xpad,
lh + 2 * ypad,
[LayoutLine(0, 0, lw, lh, 1, 0, line1)]
)
# now we know that both the first and last word fit, and that
# there's at least one instances of the split_str in the line
if (w1, e1) != (w2, s2): # more than one split_str
f_inv = p(line, c) # iterator
assert next(f_inv)[:-1] == (w2, s2) # last word should match
ww2, ss2, l2 = next(f_inv)
while l2 + l1 <= uw:
w2, s2 = ww2, ss2
ww2, ss2, l2 = next(f_inv)
if (w1, e1) == (w2, s2):
break
# now add back the left half
line1 = line[:w1]
last_word = line[w1]
last_text = last_word.text[:e1]
self.options = last_word.options
s = self.get_extents(last_text)
if len(last_text):
line1.append(LayoutWord(last_word.options, s[0], s[1], last_text))
line1.append(elps)
# now add back the right half
first_word = line[w2]
first_text = first_word.text[s2 + 1:]
self.options = first_word.options
s = self.get_extents(first_text)
if len(first_text):
line1.append(LayoutWord(first_word.options, s[0], s[1],
first_text))
line1.extend(line[w2 + 1:])
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
if uw < lw:
self.is_shortened = True
return (
lw + 2 * xpad,
lh + 2 * ypad,
[LayoutLine(0, 0, lw, lh, 1, 0, line1)]
)
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import logging
from datetime import datetime
from .. import base
logger = logging.getLogger(__name__)
# Module API
def extract_source(record):
source = {
'id': 'ictrp',
'name': 'WHO ICTRP',
'type': 'register',
'source_url': 'http://www.who.int/trialsearch/',
'terms_and_conditions_url': 'http://www.who.int/ictrp/search/download/en/',
}
return source
def extract_trial(record):
# Get identifiers
registries = {
'ANZCTR': 'actrn', # Australia
'ChiCTR': 'chictr', # China
'ClinicalTrials.gov': 'nct',
'EUCTR': 'euctr',
'German Clinical Trials Register': 'drks', # German
'IRCT': 'irct', # Iran
'ISRCTN': 'isrctn',
'JPRN': 'jprn', # Japan
'KCT': 'kct', # Korea
'Netherlands Trial Register': 'ntr', # Netherlands
'PACTR': 'pactr', # Pan Africa
'REBEC': 'rbr', # Brazil
'RPCEC': 'rpcec', # Cuba
'RPEC': 'per', # Peru
'TCTR': 'tctr', # Thai
}
source_id = registries[record['register']]
identifier = record['main_id']
# Extract EUCTR master identifier
if source_id == 'euctr' and len(identifier) > 19:
identifier = identifier.rsplit('-', 1)[0]
identifiers = base.helpers.clean_identifiers({
source_id: identifier,
})
# Get public title
public_title = base.helpers.get_optimal_title(
record['public_title'],
record['scientific_title'],
record['main_id'],
)
# Get status and recruitment status
statuses = {
'': [None, None],
'active, not recruiting': ['ongoing', 'not_recruiting'],
'active': ['ongoing', 'unknown'],
'approved for marketing': ['other', 'other'],
'authorised-recruitment may be ongoing or finished': ['ongoing', 'unknown'],
'available': ['ongoing', 'unknown'],
'canceled': ['terminated', 'not_recruiting'],
'closed: follow-up complete': ['complete', 'not_recruiting'],
'closed: follow-up continuing': ['ongoing', 'not_recruiting'],
'closed to recruitment: follow up complete': ['complete', 'not_recruiting'],
'closed to recruitment: follow up continuing': ['ongoing', 'not_recruiting'],
'complete': ['complete', 'not_recruiting'],
'completed': ['complete', 'not_recruiting'],
'completed: recruitment & data analysis complete': ['complete', 'not_recruiting'],
'complete: follow-up complete': ['complete', 'not_recruiting'],
'complete: follow-up continuing': ['ongoing', 'not_recruiting'],
'data analysis completed': ['complete', 'not_recruiting'],
'early termination': ['terminated', 'not_recruiting'],
'enrolling by invitation': ['ongoing', 'recruiting'],
'finished': ['complete', 'not_recruiting'],
'interrupted': ['suspended', 'not_recruiting'],
'main results already published': ['complete', 'not_recruiting'],
'no longer available': ['other', 'other'],
'no longer recruiting': ['ongoing', 'not_recruiting'],
'non authorized': ['other', 'other'],
'not recruiting': ['ongoing', 'not_recruiting'],
'not yet recruiting': ['ongoing', 'not_recruiting'],
'open public recruiting': ['ongoing', 'recruiting'],
'open to recruitment: actively recruiting participa': ['ongoing', 'recruiting'],
'other': ['other', 'other'],
'pending (not yet recruiting)': ['ongoing', 'not_recruiting'],
'pending': ['ongoing', 'unknown'],
'recruiting': ['ongoing', 'recruiting'],
'recruitment completed': ['ongoing', 'not_recruiting'],
'stopped early': ['terminated', 'not_recruiting'],
'suspended': ['suspended', 'not_recruiting'],
'temporarily closed': ['suspended', 'not_recruiting'],
'temporarily not available': ['other', 'other'],
'temporary halt or suspension': ['suspended', 'not_recruiting'],
'temporary halt': ['suspended', 'not_recruiting'],
'terminated': ['terminated', 'not_recruiting'],
'withdrawn': ['withdrawn', 'not_recruiting'],
'withheld': ['other', 'other'],
}
key = record.get('recruitment_status', '').strip().lower()
status, recruitment_status = statuses[key]
# Get gender
gender = None
# Get has_published_results
has_published_results = None
# Registration date
registration_date = None
date_of_registration = record.get('date_of_registration')
if date_of_registration:
date_formats = [
'%d/%m/%Y',
'%Y-%m-%d',
]
for fmt in date_formats:
try:
registration_date = datetime.strptime(date_of_registration, fmt).date()
break
except ValueError:
pass
if not registration_date:
logger.warn("Failed to parse date '%s'" % date_of_registration)
# Get study phase
study_phase = base.normalizers.get_normalized_phase(record['study_phase'])
trial = {
'identifiers': identifiers,
'public_title': public_title,
'scientific_title': record['scientific_title'],
'status': status,
'recruitment_status': recruitment_status,
'eligibility_criteria': {'criteria': record['key_inclusion_exclusion_criteria']},
'target_sample_size': record['target_sample_size'],
'study_type': record['study_type'],
'study_design': record['study_design'],
'study_phase': study_phase,
'primary_outcomes': record['primary_outcomes'],
'secondary_outcomes': record['secondary_outcomes'],
'gender': gender,
'has_published_results': has_published_results,
'registration_date': registration_date,
}
return trial
def extract_conditions(record):
conditions = []
for element in record['health_conditions_or_problems_studied'] or []:
conditions.append({
'name': element,
})
return conditions
def extract_interventions(record):
interventions = []
pattern = r'(?:Intervention\s*)?\d[.):]'
for element in record['interventions'] or []:
for name in re.split(pattern, element):
interventions.append({
'name': name,
})
return interventions
def extract_locations(record):
locations = []
for element in record['countries_of_recruitment'] or []:
for index, name in enumerate(re.split(r'[,;]', element)):
name = name.strip()
# For cases like "Venezuela, Bolivarian Republic of"
if index == 1 and 'republic of' in name.lower():
continue
locations.append({
'name': name,
'type': 'country',
# ---
'trial_role': 'recruitment_countries',
})
return locations
def extract_organisations(record):
organisations = []
return organisations
def extract_persons(record):
persons = []
return persons
| |
# Copyright (c) 2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Authors: Alberto Solino (@agsolino)
# Kacper Nowak (@kacpern)
#
# Description:
# RFC 4511 Minimalistic implementation. We don't need much functionality yet
# If we need more complex use cases we might opt to use a third party implementation
# Keep in mind the APIs are still unstable, might require to re-write your scripts
# as we change them.
# Adding [MS-ADTS] specific functionality
#
from pyasn1.codec.ber import encoder, decoder
from pyasn1.type import univ, namedtype, namedval, tag, constraint
__all__ = [
'CONTROL_PAGEDRESULTS', 'KNOWN_CONTROLS', 'NOTIFICATION_DISCONNECT', 'KNOWN_NOTIFICATIONS',
# classes
'ResultCode', 'Scope', 'DerefAliases', 'Operation', 'MessageID', 'LDAPString', 'LDAPOID', 'LDAPDN',
'RelativeLDAPDN', 'AttributeDescription', 'AttributeValue', 'AssertionValue', 'MatchingRuleID', 'URI',
'AttributeValueAssertion', 'PartialAttribute', 'PartialAttributeList', 'Attribute', 'AttributeList',
'AttributeSelection', 'Referral', 'LDAPResult', 'SaslCredentials', 'AuthenticationChoice', 'BindRequest',
'BindResponse', 'UnbindRequest', 'SubstringFilter', 'MatchingRuleAssertion', 'Filter', 'SearchRequest',
'SearchResultEntry', 'SearchResultReference', 'SearchResultDone', 'ModifyRequest', 'ModifyResponse', 'AddRequest',
'AddResponse', 'DelRequest', 'DelResponse', 'ModifyDNRequest', 'ModifyDNResponse', 'CompareRequest',
'CompareResponse', 'AbandonRequest', 'ExtendedRequest', 'ExtendedResponse', 'IntermediateResponse', 'Control',
'Controls', 'SimplePagedResultsControlValue', 'SimplePagedResultsControl', 'LDAPMessage'
]
# Controls
CONTROL_PAGEDRESULTS = '1.2.840.113556.1.4.319'
KNOWN_CONTROLS = {}
# Unsolicited notifications
NOTIFICATION_DISCONNECT = '1.3.6.1.4.1.1466.20036'
KNOWN_NOTIFICATIONS = {NOTIFICATION_DISCONNECT: 'Notice of Disconnection'}
maxInt = univ.Integer(2147483647)
class DefaultSequenceAndSetBaseMixin:
def getComponentByPosition(self, idx):
for cls in self.__class__.__bases__:
if cls is not DefaultSequenceAndSetBaseMixin:
try:
component = cls.getComponentByPosition(self, idx)
except AttributeError:
continue
if component is None:
return self.setComponentByPosition(idx).getComponentByPosition(idx)
return component
class ResultCode(univ.Enumerated):
namedValues = namedval.NamedValues(
('success', 0),
('operationsError', 1),
('protocolError', 2),
('timeLimitExceeded', 3),
('sizeLimitExceeded', 4),
('compareFalse', 5),
('compareTrue', 6),
('authMethodNotSupported', 7),
('strongerAuthRequired', 8),
('referral', 10),
('adminLimitExceeded', 11),
('unavailableCriticalExtension', 12),
('confidentialityRequired', 13),
('saslBindInProgress', 14),
('noSuchAttribute', 16),
('undefinedAttributeType', 17),
('inappropriateMatching', 18),
('constraintViolation', 19),
('attributeOrValueExists', 20),
('invalidAttributeSyntax', 21),
('noSuchObject', 32),
('aliasProblem', 33),
('invalidDNSyntax', 34),
('aliasDereferencingProblem', 36),
('inappropriateAuthentication', 48),
('invalidCredentials', 49),
('insufficientAccessRights', 50),
('busy', 51),
('unavailable', 52),
('unwillingToPerform', 53),
('loopDetect', 54),
('namingViolation', 64),
('objectClassViolation', 65),
('notAllowedOnNonLeaf', 66),
('notAllowedOnRDN', 67),
('entryAlreadyExists', 68),
('objectClassModsProhibited', 69),
('affectsMultipleDSAs', 71),
('other', 80),
)
class Scope(univ.Enumerated):
namedValues = namedval.NamedValues(
('baseObject', 0),
('singleLevel', 1),
('wholeSubtree', 2),
)
class DerefAliases(univ.Enumerated):
namedValues = namedval.NamedValues(
('neverDerefAliases', 0),
('derefInSearching', 1),
('derefFindingBaseObj', 2),
('derefAlways', 3),
)
class Operation(univ.Enumerated):
namedValues = namedval.NamedValues(
('add', 0),
('delete', 1),
('replace', 2),
)
class MessageID(univ.Integer):
subtypeSpec = constraint.ValueRangeConstraint(0, maxInt)
class LDAPString(univ.OctetString):
encoding = 'utf-8'
class LDAPOID(univ.OctetString):
pass
class LDAPDN(LDAPString):
pass
class RelativeLDAPDN(LDAPString):
pass
class AttributeDescription(LDAPString):
pass
class AttributeValue(univ.OctetString):
pass
class AssertionValue(univ.OctetString):
pass
class MatchingRuleID(LDAPString):
pass
class URI(LDAPString):
pass
class AttributeValueAssertion(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('attributeDesc', AttributeDescription()),
namedtype.NamedType('assertionValue', AssertionValue())
)
class PartialAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
)
class PartialAttributeList(univ.SequenceOf):
componentType = PartialAttribute()
class Attribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType(
'vals',
univ.SetOf(componentType=AttributeValue()).subtype(subtypeSpec=constraint.ValueSizeConstraint(1, maxInt))
)
)
class AttributeList(univ.SequenceOf):
componentType = Attribute()
class AttributeSelection(univ.SequenceOf):
componentType = LDAPString()
class Referral(univ.SequenceOf):
componentType = URI()
subtypeSpec = constraint.ValueSizeConstraint(1, maxInt)
class LDAPResult(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('resultCode', ResultCode()),
namedtype.NamedType('matchedDN', LDAPDN()),
namedtype.NamedType('diagnosticMessage', LDAPString()),
namedtype.OptionalNamedType(
'referral', Referral().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))
)
)
class SaslCredentials(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('mechanism', LDAPString()),
namedtype.OptionalNamedType('credentials', univ.OctetString())
)
class AuthenticationChoice(DefaultSequenceAndSetBaseMixin, univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'simple',
univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType(
'sasl',
SaslCredentials().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))
),
namedtype.NamedType(
'sicilyPackageDiscovery',
univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 9))
),
namedtype.NamedType(
'sicilyNegotiate',
univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 10))
),
namedtype.NamedType(
'sicilyResponse',
univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 11))
)
)
class BindRequest(DefaultSequenceAndSetBaseMixin, univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 0))
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 127))),
namedtype.NamedType('name', LDAPDN()),
namedtype.NamedType('authentication', AuthenticationChoice())
)
class BindResponse(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1))
componentType = namedtype.NamedTypes(
namedtype.NamedType('resultCode', ResultCode()),
namedtype.NamedType('matchedDN', LDAPDN()),
namedtype.NamedType('diagnosticMessage', LDAPString()),
namedtype.OptionalNamedType(
'referral',
Referral().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))
),
namedtype.OptionalNamedType(
'serverSaslCreds',
univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))
)
)
class UnbindRequest(univ.Null):
tagSet = univ.Null.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2))
class SubstringFilter(DefaultSequenceAndSetBaseMixin, univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType(
'substrings',
univ.SequenceOf(componentType=univ.Choice(componentType=namedtype.NamedTypes(
namedtype.NamedType(
'initial',
AssertionValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType(
'any',
AssertionValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
namedtype.NamedType(
'final',
AssertionValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))
)
)))
)
)
class MatchingRuleAssertion(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType(
'matchingRule',
MatchingRuleID().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
namedtype.OptionalNamedType(
'type',
AttributeDescription().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))
),
namedtype.NamedType(
'matchValue',
AssertionValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))
),
namedtype.DefaultedNamedType(
'dnAttributes',
univ.Boolean().subtype(value=False, implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
)
)
class Filter(DefaultSequenceAndSetBaseMixin, univ.Choice):
pass
Filter.componentType = namedtype.NamedTypes(
namedtype.NamedType(
'and',
univ.SetOf(componentType=Filter()).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType(
'or',
univ.SetOf(componentType=Filter()).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
namedtype.NamedType(
'not',
Filter().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))
),
namedtype.NamedType(
'equalityMatch',
AttributeValueAssertion().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))
),
namedtype.NamedType(
'substrings',
SubstringFilter().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))
),
namedtype.NamedType(
'greaterOrEqual',
AttributeValueAssertion().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))
),
namedtype.NamedType(
'lessOrEqual',
AttributeValueAssertion().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))
),
namedtype.NamedType(
'present',
AttributeDescription().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))
),
namedtype.NamedType(
'approxMatch',
AttributeValueAssertion().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))
),
namedtype.NamedType(
'extensibleMatch',
MatchingRuleAssertion().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9))
)
)
class SearchRequest(DefaultSequenceAndSetBaseMixin, univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 3))
componentType = namedtype.NamedTypes(
namedtype.NamedType('baseObject', LDAPDN()),
namedtype.NamedType('scope', Scope()),
namedtype.NamedType('derefAliases', DerefAliases()),
namedtype.NamedType(
'sizeLimit', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))
),
namedtype.NamedType(
'timeLimit', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))
),
namedtype.NamedType('typesOnly', univ.Boolean()),
namedtype.NamedType('filter', Filter()),
namedtype.NamedType('attributes', AttributeSelection())
)
class SearchResultEntry(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 4))
componentType = namedtype.NamedTypes(
namedtype.NamedType('objectName', LDAPDN()),
namedtype.NamedType('attributes', PartialAttributeList())
)
class SearchResultReference(univ.SequenceOf):
tagSet = univ.SequenceOf.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 19))
componentType = URI()
subtypeSpec = constraint.ValueSizeConstraint(1, maxInt)
class SearchResultDone(LDAPResult):
tagSet = LDAPResult.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 5))
class ModifyRequest(DefaultSequenceAndSetBaseMixin, univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 6))
componentType = namedtype.NamedTypes(
namedtype.NamedType('object', LDAPDN()),
namedtype.NamedType(
'changes',
univ.SequenceOf(componentType=univ.Sequence(componentType=namedtype.NamedTypes(
namedtype.NamedType('operation', Operation()),
namedtype.NamedType('modification', PartialAttribute())
)))
)
)
class ModifyResponse(LDAPResult):
tagSet = LDAPResult.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 7))
class AddRequest(DefaultSequenceAndSetBaseMixin, univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 8))
componentType = namedtype.NamedTypes(
namedtype.NamedType('entry', LDAPDN()),
namedtype.NamedType('attributes', AttributeList())
)
class AddResponse(LDAPResult):
tagSet = LDAPResult.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 9))
class DelRequest(LDAPDN):
tagSet = LDAPDN.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 10))
class DelResponse(LDAPResult):
tagSet = LDAPResult.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 11))
class ModifyDNRequest(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 12))
componentType = namedtype.NamedTypes(
namedtype.NamedType('entry', LDAPDN()),
namedtype.NamedType('newrdn', RelativeLDAPDN()),
namedtype.NamedType('deleteoldrdn', univ.Boolean()),
namedtype.OptionalNamedType(
'newSuperior', LDAPDN().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
)
)
class ModifyDNResponse(LDAPResult):
tagSet = LDAPResult.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 13))
class CompareRequest(DefaultSequenceAndSetBaseMixin, univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 14))
componentType = namedtype.NamedTypes(
namedtype.NamedType('entry', LDAPDN()),
namedtype.NamedType('ava', AttributeValueAssertion())
)
class CompareResponse(LDAPResult):
tagSet = LDAPResult.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 15))
class AbandonRequest(MessageID):
tagSet = MessageID.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 16))
class ExtendedRequest(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 23))
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'requestName', LDAPOID().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.OptionalNamedType(
'requestValue', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
)
)
class ExtendedResponse(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 24))
componentType = namedtype.NamedTypes(
namedtype.NamedType('resultCode', ResultCode()),
namedtype.NamedType('matchedDN', LDAPDN()),
namedtype.NamedType('diagnosticMessage', LDAPString()),
namedtype.OptionalNamedType(
'referral',
Referral().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))
),
namedtype.OptionalNamedType(
'responseName',
LDAPOID().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 10))
),
namedtype.OptionalNamedType(
'responseValue',
univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 11))
)
)
class IntermediateResponse(univ.Sequence):
tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 25))
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType(
'responseName',
LDAPOID().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.OptionalNamedType(
'responseValue',
univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
)
)
class Control(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('controlType', LDAPOID()),
namedtype.DefaultedNamedType('criticality', univ.Boolean().subtype(value=False)),
namedtype.OptionalNamedType('controlValue', univ.OctetString())
)
def setComponentByPosition(self, idx, value=None,
verifyConstraints=True,
exactTypes=False,
matchTags=True,
matchConstraints=True):
if idx == 0: # controlType
try:
cls = KNOWN_CONTROLS[value]
if self.__class__ is not cls:
self.__class__ = cls
except KeyError:
pass
return univ.Sequence.setComponentByPosition(self, idx, value=value,
verifyConstraints=verifyConstraints,
exactTypes=exactTypes,
matchTags=matchTags,
matchConstraints=matchConstraints)
def encodeControlValue(self):
pass
def decodeControlValue(self):
return
def prettyPrint(self, scope=0):
r = univ.Sequence.prettyPrint(self, scope)
decodedControlValue = self.decodeControlValue()
if decodedControlValue is not None:
r = r[:r.rindex('=') + 1] + '%s\n' % decodedControlValue.prettyPrint(scope + 1)
return r
class Controls(univ.SequenceOf):
componentType = Control()
class SimplePagedResultsControlValue(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('size', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))),
namedtype.NamedType('cookie', univ.OctetString()),
)
class SimplePagedResultsControl(Control):
def __init__(self, criticality=None, size=1000, cookie='', **kwargs):
Control.__init__(self, **kwargs)
self['controlType'] = CONTROL_PAGEDRESULTS
if criticality is not None:
self['criticality'] = criticality
self._size = size
self._cookie = cookie
self.encodeControlValue()
def encodeControlValue(self):
self['controlValue'] = encoder.encode(SimplePagedResultsControlValue().setComponents(self._size, self._cookie))
def decodeControlValue(self):
decodedControlValue, _ = decoder.decode(self['controlValue'], asn1Spec=SimplePagedResultsControlValue())
self._size, self._cookie = decodedControlValue[0], decodedControlValue[1]
return decodedControlValue
def getCriticality(self):
return self['criticality']
def setCriticality(self, value):
self['criticality'] = value
def getSize(self):
self.decodeControlValue()
return self._size
def setSize(self, value):
self._size = value
self.encodeControlValue()
def getCookie(self):
self.decodeControlValue()
return self._cookie
def setCookie(self, value):
self._cookie = value
self.encodeControlValue()
KNOWN_CONTROLS[CONTROL_PAGEDRESULTS] = SimplePagedResultsControl
class LDAPMessage(DefaultSequenceAndSetBaseMixin, univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('messageID', MessageID()),
namedtype.NamedType('protocolOp', univ.Choice(componentType=namedtype.NamedTypes(
namedtype.NamedType('bindRequest', BindRequest()),
namedtype.NamedType('bindResponse', BindResponse()),
namedtype.NamedType('unbindRequest', UnbindRequest()),
namedtype.NamedType('searchRequest', SearchRequest()),
namedtype.NamedType('searchResEntry', SearchResultEntry()),
namedtype.NamedType('searchResDone', SearchResultDone()),
namedtype.NamedType('searchResRef', SearchResultReference()),
namedtype.NamedType('modifyRequest', ModifyRequest()),
namedtype.NamedType('modifyResponse', ModifyResponse()),
namedtype.NamedType('addRequest', AddRequest()),
namedtype.NamedType('addResponse', AddResponse()),
namedtype.NamedType('delRequest', DelRequest()),
namedtype.NamedType('delResponse', DelResponse()),
namedtype.NamedType('modDNRequest', ModifyDNRequest()),
namedtype.NamedType('modDNResponse', ModifyDNResponse()),
namedtype.NamedType('compareRequest', CompareRequest()),
namedtype.NamedType('compareResponse', CompareResponse()),
namedtype.NamedType('abandonRequest', AbandonRequest()),
namedtype.NamedType('extendedReq', ExtendedRequest()),
namedtype.NamedType('extendedResp', ExtendedResponse()),
namedtype.NamedType('intermediateResponse', IntermediateResponse())
))),
namedtype.OptionalNamedType(
'controls',
Controls().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
# fix AD nonconforming to RFC4511
namedtype.OptionalNamedType(
'responseName',
LDAPOID().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 10))
),
namedtype.OptionalNamedType(
'responseValue',
univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 11))
)
)
| |
"""
===========================
Formaldehyde mm-line fitter
===========================
This is a formaldehyde 3_03-2_02 / 3_22-221 and 3_03-2_02/3_21-2_20 fitter.
It is based entirely on RADEX models.
Module API
^^^^^^^^^^
"""
from __future__ import print_function
import numpy as np
from . import hyperfine
from . import fitter,model#,modelgrid
from six.moves import xrange
try: # for model grid reading
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
try:
from despotic import cloud
Democracy=False
except ImportError:
Democracy=True # Because it's not despotic :D
from astropy.utils.console import ProgressBar
from astropy.table import Table
import warnings
line_names = ['threeohthree','threetwotwo','threetwoone']
# http://adsabs.harvard.edu/abs/1971ApJ...169..429T has the most accurate freqs
# http://adsabs.harvard.edu/abs/1972ApJ...174..463T [twotwo]
central_freq_dict = {
'threeohthree': 218.222192e9,
'threetwotwo': 218.475632e9,
'threetwoone': 218.760066e9,
}
line_strength_dict={
'threeohthree': 1.,
'threetwotwo': 1.,
'threetwoone': 1.,
}
relative_strength_total_degeneracy={
'threeohthree': 1.,
'threetwotwo': 1.,
'threetwoone': 1.,
}
freq_dict = central_freq_dict
aval_dict = {
'threeohthree': 2.818e-4,
'threetwotwo': 1.571e-4,
'threetwoone': 1.577e-4,
}
voff_lines_dict = {
'threeohthree': 0.,
'threetwotwo': 0.,
'threetwoone': 0.,
}
formaldehyde_mm_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict,
freq_dict, line_strength_dict, relative_strength_total_degeneracy)
formaldehyde_mm_vtau_fitter = formaldehyde_mm_vtau.fitter
formaldehyde_mm_vtau_vheight_fitter = formaldehyde_mm_vtau.vheight_fitter
def build_despotic_grids(gridfile='ph2co_grid_despotic.fits', ph2coAbund=1e-8,
nDens=21, logDensLower=2.0, logDensUpper=6.0,
nCol=21, logColLower=11.0, logColUpper=15.0,
nTemp=51, Tlower=10.0, Tupper=300.0,
nDv=5, DvLower=1.0, DvUpper=5.0):
"""
Generates grids of p-H2CO line intensities using Despotic. Outputs a astropy Table.
Parameters
----------
gridfile : string
Name of grid file to output.
ph2coAbund : float
Fractional abundance of p-H2CO
nDens : int
Number of grid points in the volume density
logDensLower : float
log of volume density at lower bound of grid (log(n/cm**-3))
logDensUpper : float
log of volume density at upper bound of grid (log(n/cm**-3))
nCol : int
Number of grid points in the column density
logColLower : float
log of column density of p-H2CO at lower bound of grid (log(N/cm**-2))
logColUpper : float
log of column density of p-H2CO at upper bound of grid (log(N/cm**-2))
nTemp : int
Number of grid points in the temperature grid
Tower : float
temperature at lower bound of grid (K)
Tupper : float
temperature at upper bound of grid (K)
nDv : int
Number of grid points in the line width
DvLower : float
line width (non-thermal) at lower bound of grid (km/s)
DvUpper : float
line width (non-thermal) at upper bound of grid (km/s)
"""
if Democracy:
raise Exception("No despotic install found. Cannot build grids")
core = cloud(fileName="protostellarCore.desp", verbose=True)
nlower = logDensLower
nupper = logDensUpper
Nlower = logColLower
Nupper = logColUpper
Temps = np.linspace(Tlower, Tupper, nTemp)
Cols = 1e1**np.linspace(Nlower, Nupper, nCol)
Densities = 1e1**(np.linspace(nlower, nupper, nDens))
LineWidth = np.linspace(DvLower, DvUpper, nDv)
outtable = Table(names = ['Tex_303_202', 'Tex_322_221', 'Tex_321_220',
'tau_303_202', 'tau_322_221', 'tau_321_220',
'Temperature', 'Column', 'nH2', 'sigmaNT'])
TempArr, ColArr, DensArr, DvArr = np.meshgrid(Temps,
Cols,
Densities,
LineWidth)
for T, N, n, dv in ProgressBar(zip(TempArr.flatten(),
ColArr.flatten(),
DensArr.flatten(),
DvArr.flatten())):
core.colDen = N/ph2coAbund
core.Tg = T
core.Td = T
core.nH = n
core.sigmaNT = dv
lines = core.lineLum('p-h2co')
outtable.add_row()
outtable[-1]['Tex_303_202'] = lines[2]['Tex']
outtable[-1]['tau_303_202'] = lines[2]['tau']
outtable[-1]['Tex_322_221'] = lines[9]['Tex']
outtable[-1]['tau_322_221'] = lines[9]['tau']
outtable[-1]['Tex_321_220'] = lines[12]['Tex']
outtable[-1]['tau_321_220'] = lines[12]['tau']
outtable[-1]['Temperature'] = T
outtable[-1]['Column'] = N
outtable[-1]['nH2'] = n
outtable[-1]['sigmaNT'] = dv
outtable.write(gridfile, format='fits',overwrite=True)
def formaldehyde_mm_despotic_functions(gridtable, order=1):
"""
This builds interpolation functions for use in fitting.
Parameters
----------
gridtable : str
Name of grid in astropy table
Returns
-------
h2co_303_202, h2co_322_221, h2co_321_220 : function
Functions that return the excitation temperature and optical depth given input density,
temperature, column density and line width.
"""
if gridtable is None:
warnings.warn("No gridfile found. Building grids using despotic")
try:
build_despotic_grids('ph2co_grid_despotic.fits')
gridtable = Table.read('ph2co_grid_despotic.fits')
except: # TODO -- make this more specific
warnings.warn("Failed to build functions because no grids available")
return
DensArr = np.sort(np.unique(gridtable['nH2']))
ColArr = np.sort(np.unique(gridtable['Column']))
TempArr = np.sort(np.unique(gridtable['Temperature']))
DvArr = np.sort(np.unique(gridtable['sigmaNT']))
GridData_Tex_303_202 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
GridData_Tex_322_221 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
GridData_Tex_321_220 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
GridData_tau_303_202 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
GridData_tau_322_221 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
GridData_tau_321_220 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
ii = np.interp(gridtable['nH2'], DensArr, np.arange(len(DensArr))).astype(np.int)
jj = np.interp(gridtable['Column'], ColArr, np.arange(len(ColArr))).astype(np.int)
kk = np.interp(gridtable['Temperature'], TempArr, np.arange(len(TempArr))).astype(np.int)
ll = np.interp(gridtable['sigmaNT'], DvArr, np.arange(len(DvArr))).astype(np.int)
GridData_Tex_303_202[ii, jj, kk, ll] = gridtable['Tex_303_202']
GridData_Tex_322_221[ii, jj, kk, ll] = gridtable['Tex_322_221']
GridData_Tex_321_220[ii, jj, kk, ll] = gridtable['Tex_321_220']
GridData_tau_303_202[ii, jj, kk, ll] = gridtable['tau_303_202']
GridData_tau_322_221[ii, jj, kk, ll] = gridtable['tau_322_221']
GridData_tau_321_220[ii, jj, kk, ll] = gridtable['tau_321_220']
def h2co_303_202(logdensity=4, logcolumn=13, temperature=25, sigmav=2.0):
iidx = np.interp(logdensity, np.log10(DensArr), np.arange(len(DensArr)))
jidx = np.interp(logcolumn, np.log10(ColArr), np.arange(len(ColArr)))
kidx = np.interp(temperature, TempArr, np.arange(len(TempArr)))
lidx = np.interp(sigmav, DvArr, np.arange(len(DvArr)))
xvec = np.array([iidx, jidx, kidx, lidx])
xvec.shape += (1,)
Tex = scipy.ndimage.interpolation.map_coordinates(GridData_Tex_303_202,
xvec, order=order)
tau = scipy.ndimage.interpolation.map_coordinates(GridData_tau_303_202,
xvec, order=order)
return (Tex, tau)
def h2co_322_221(logdensity=4, logcolumn=13, temperature=25, sigmav=2.0):
iidx = np.interp(logdensity, np.log10(DensArr), np.arange(len(DensArr)))
jidx = np.interp(logcolumn, np.log10(ColArr), np.arange(len(ColArr)))
kidx = np.interp(temperature, TempArr, np.arange(len(TempArr)))
lidx = np.interp(sigmav, DvArr, np.arange(len(DvArr)))
xvec = np.array([iidx, jidx, kidx, lidx])
xvec.shape += (1,)
Tex = scipy.ndimage.interpolation.map_coordinates(GridData_Tex_322_221, xvec, order=order)
tau = scipy.ndimage.interpolation.map_coordinates(GridData_tau_322_221, xvec, order=order)
return (Tex, tau)
def h2co_321_220(logdensity=4, logcolumn=13, temperature=25, sigmav=2.0):
iidx = np.interp(logdensity, np.log10(DensArr), np.arange(len(DensArr)))
jidx = np.interp(logcolumn, np.log10(ColArr), np.arange(len(ColArr)))
kidx = np.interp(temperature, TempArr, np.arange(len(TempArr)))
lidx = np.interp(sigmav, DvArr, np.arange(len(DvArr)))
xvec = np.array([iidx, jidx, kidx, lidx])
xvec.shape += (1,)
Tex = scipy.ndimage.interpolation.map_coordinates(GridData_Tex_321_220, xvec, order=order)
tau = scipy.ndimage.interpolation.map_coordinates(GridData_tau_321_220, xvec, order=order)
return (Tex, tau)
return (h2co_303_202, h2co_322_221, h2co_321_220)
def formaldehyde_mm_despotic(xarr,
temperature=25,
column=13,
density=4,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
h2co_303_202=None,
h2co_322_221=None,
h2co_321_220=None,
debug=False,
verbose=False,
**kwargs):
"""
Fitter to p-H2CO using despotic grids. Requires building grids and passing in
functions for interpolating the h2co transition optical depth and
excitation temperatures.
"""
Tex303_202, tau303_202 = h2co_303_202(logdensity=density,
logcolumn=column,
temperature=temperature,
sigmav=width)
Tex322_221, tau322_221 = h2co_322_221(logdensity=density,
logcolumn=column,
temperature=temperature,
sigmav=width)
Tex321_220, tau321_220 = h2co_321_220(logdensity=density,
logcolumn=column,
temperature=temperature,
sigmav=width)
tex = [Tex303_202, Tex322_221, Tex321_220]
tau = [tau303_202, tau322_221, tau321_220]
minfreq = [218.15, 218.40, 218.7]
maxfreq = [218.25, 218.55, 218.8]
spec = np.sum([
(formaldehyde_mm_vtau(xarr, Tex=float(tex[ii]), tau=float(tau[ii]),
xoff_v=xoff_v, width=width, **kwargs)
* (xarr.as_unit('GHz').value>minfreq[ii]) *
(xarr.as_unit('GHz').value<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde_mm_radex(xarr,
temperature=25,
column=13,
density=4,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
debug=False,
verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
Parameters
----------
grid_vwidth : float
the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
density : float
Density!
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
zinds,yinds,xinds = np.indices(taugrid[0].shape)
if 'CD1_1' in hdr:
cd11 = 'CD1_1'
cd22 = 'CD2_2'
else:
cd11 = 'CDELT1'
cd22 = 'CDELT2'
densityarr = (xinds+hdr['CRPIX1']-1)*hdr[cd11]+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr[cd22]+hdr['CRVAL2'] # log column
temparr = (zinds+hdr['CRPIX3']-1)*hdr['CDELT3']+hdr['CRVAL3'] # lin temperature
minfreq = (218.,)
maxfreq = (219.,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
zinds,yinds,xinds = np.indices(taugrid[0].shape)
if 'CD1_1' in hdr:
cd11 = 'CD1_1'
cd22 = 'CD2_2'
else:
cd11 = 'CDELT1'
cd22 = 'CDELT2'
densityarr = (xinds+hdr['CRPIX1']-1)*hdr[cd11]+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr[cd22]+hdr['CRVAL2'] # log column
temparr = (zinds+hdr['CRPIX3']-1)*hdr['CDELT3']+hdr['CRVAL3'] # lin temperature
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
#tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,0,:], xinds[0,0,:])
gridval2 = np.interp(column, columnarr[0,:,0], yinds[0,:,0])
gridval3 = np.interp(temperature, temparr[:,0,0], zinds[:,0,0])
if np.isnan(gridval1) or np.isnan(gridval2) or np.isnan(gridval3):
raise ValueError("Invalid column/density")
if scipyOK:
# this is mostly a trick for speed: slice so you only have two thin layers to interpolate
# between
#slices = [density_gridnumber] + [slice(np.floor(gv),np.floor(gv)+2) for gv in (gridval2,gridval1)]
slices = [slice(np.floor(gridval3),np.floor(gridval3)+2),
slice(np.floor(gridval2),np.floor(gridval2)+2),
slice(np.floor(gridval1),np.floor(gridval1)+2)
]
tau = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval3%1],[gridval2%1],[gridval1%1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval3%1],[gridval2%1],[gridval1%1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
for ta,tk in zip(tau,tex):
print("density %20.12g temperature %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, temperature, column, ta, tk))
if debug:
import pdb; pdb.set_trace()
spec = np.sum([
(formaldehyde_mm_vtau(xarr, Tex=float(tex[ii]), tau=float(tau[ii]),
xoff_v=xoff_v, width=width, **kwargs)
* (xarr.as_unit('GHz').value>minfreq[ii]) *
(xarr.as_unit('GHz').value<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde_mm(xarr, amp=1.0, xoff_v=0.0, width=1.0,
return_components=False ):
"""
Generate a model Formaldehyde spectrum based on simple gaussian parameters
the "amplitude" is an essentially arbitrary parameter; we therefore define it to be Tex given tau=0.01 when
passing to the fitter
The final spectrum is then rescaled to that value
The components are independent, but with offsets set by frequency... in principle.
"""
mdl = formaldehyde_vtau(xarr, Tex=amp*0.01, tau=0.01, xoff_v=xoff_v,
width=width,
return_components=return_components)
if return_components:
mdlpeak = np.abs(mdl).squeeze().sum(axis=0).max()
else:
mdlpeak = np.abs(mdl).max()
if mdlpeak > 0:
mdl *= amp/mdlpeak
return mdl
class formaldehyde_mm_model(model.SpectralModel):
pass
formaldehyde_mm_fitter = formaldehyde_mm_model(formaldehyde_mm, 3,
parnames=['amp','center','width'],
parlimited=[(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=("A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
formaldehyde_mm_vheight_fitter = formaldehyde_mm_model(fitter.vheightmodel(formaldehyde_mm), 4,
parnames=['height','amp','center','width'],
parlimited=[(False,False),(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0)],
shortvarnames=("H","A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
try:
import pymodelfit
class pmfFormaldehydeModel(pymodelfit.FunctionModel1DAuto):
def f(self, x, amp0=1.0, xoff_v0=0.0,width0=1.0):
return formaldehyde(x,
amp=amp0,
xoff_v=xoff_v0,width=width0)
class pmfFormaldehydeModelVtau(pymodelfit.FunctionModel1DAuto):
def f(self, x, Tex0=1.0, tau0=0.01, xoff_v0=0.0, width0=1.0):
return formaldehyde_vtau(x,
Tex=Tex0, tau=tau0,
xoff_v=xoff_v0,width=width0)
except ImportError:
pass
| |
import time as real_time
import unittest
import jwt as jwt_lib
from mock import patch
from twilio.jwt import Jwt, JwtDecodeError
class DummyJwt(Jwt):
"""Jwt implementation that allows setting arbitrary payload and headers for testing."""
ALGORITHM = 'HS256'
def __init__(self, secret_key, issuer, subject=None, algorithm=None,
nbf=Jwt.GENERATE, ttl=3600, valid_until=None, headers=None,
payload=None):
super(DummyJwt, self).__init__(
secret_key=secret_key,
issuer=issuer,
subject=subject,
algorithm=algorithm or self.ALGORITHM,
nbf=nbf,
ttl=ttl,
valid_until=valid_until
)
self._payload = payload or {}
self._headers = headers or {}
def _generate_payload(self):
return self._payload
def _generate_headers(self):
return self._headers
class JwtTest(unittest.TestCase):
def assertIn(self, foo, bar, msg=None):
"""backport for 2.6"""
assert foo in bar, (msg or "%s not found in %s" % (foo, bar))
def now(self):
return int(real_time.time())
def assertJwtsEqual(self, jwt, key, expected_payload=None, expected_headers=None):
expected_headers = expected_headers or {}
expected_payload = expected_payload or {}
decoded_payload = jwt_lib.decode(jwt, key, algorithms=["HS256"], options={"verify_signature": False})
decoded_headers = jwt_lib.get_unverified_header(jwt)
self.assertEqual(expected_headers, decoded_headers)
self.assertEqual(expected_payload, decoded_payload)
@patch('time.time')
def test_basic_encode(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0},
)
@patch('time.time')
def test_encode_with_subject(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', subject='subject', headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0, 'sub': 'subject'},
)
@patch('time.time')
def test_encode_without_nbf(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', subject='subject', headers={}, payload={}, nbf=None)
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'sub': 'subject'},
)
@patch('time.time')
def test_encode_custom_ttl(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 10, 'nbf': 0},
)
@patch('time.time')
def test_encode_ttl_added_to_current_time(self, time_mock):
time_mock.return_value = 50.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 60, 'nbf': 50},
)
@patch('time.time')
def test_encode_override_ttl(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(ttl=20),
'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 20, 'nbf': 0},
)
@patch('time.time')
def test_encode_valid_until_overrides_ttl(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, valid_until=70, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 70, 'nbf': 0},
)
@patch('time.time')
def test_encode_custom_nbf(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', ttl=10, nbf=5, headers={}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 10, 'nbf': 5},
)
@patch('time.time')
def test_encode_with_headers(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', headers={'sooper': 'secret'}, payload={})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256', 'sooper': 'secret'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0},
)
@patch('time.time')
def test_encode_with_payload(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', payload={'root': 'true'})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0, 'root': 'true'},
)
@patch('time.time')
def test_encode_with_payload_and_headers(self, time_mock):
time_mock.return_value = 0.0
jwt = DummyJwt('secret_key', 'issuer', headers={'yes': 'oui'}, payload={'pay': 'me'})
self.assertJwtsEqual(
jwt.to_jwt(), 'secret_key',
expected_headers={'typ': 'JWT', 'alg': 'HS256', 'yes': 'oui'},
expected_payload={'iss': 'issuer', 'exp': 3600, 'nbf': 0, 'pay': 'me'},
)
def test_encode_no_key_fails(self):
jwt = DummyJwt(None, 'issuer')
self.assertRaises(ValueError, jwt.to_jwt)
def test_encode_decode(self):
test_start = self.now()
jwt = DummyJwt('secret_key', 'issuer', subject='hey', payload={'sick': 'sick'})
decoded_jwt = Jwt.from_jwt(jwt.to_jwt(), 'secret_key')
self.assertGreaterEqual(decoded_jwt.valid_until, self.now() + 3600)
self.assertGreaterEqual(decoded_jwt.nbf, test_start)
self.assertEqual(decoded_jwt.issuer, 'issuer')
self.assertEqual(decoded_jwt.secret_key, 'secret_key')
self.assertEqual(decoded_jwt.algorithm, 'HS256')
self.assertEqual(decoded_jwt.subject, 'hey')
self.assertEqual(decoded_jwt.headers, {'typ': 'JWT', 'alg': 'HS256'})
self.assertDictContainsSubset({
'iss': 'issuer',
'sub': 'hey',
'sick': 'sick',
}, decoded_jwt.payload)
def test_encode_decode_mismatched_algorithms(self):
jwt = DummyJwt('secret_key', 'issuer', algorithm='HS512', subject='hey', payload={'sick': 'sick'})
self.assertRaises(JwtDecodeError, Jwt.from_jwt, jwt.to_jwt())
def test_decode_bad_secret(self):
jwt = DummyJwt('secret_key', 'issuer')
self.assertRaises(JwtDecodeError, Jwt.from_jwt, jwt.to_jwt(), 'letmeinplz')
def test_decode_modified_jwt_fails(self):
jwt = DummyJwt('secret_key', 'issuer')
example_jwt = jwt.to_jwt()
example_jwt = 'ABC' + example_jwt[3:]
self.assertRaises(JwtDecodeError, Jwt.from_jwt, example_jwt, 'secret_key')
def test_decode_validates_expiration(self):
expired_jwt = DummyJwt('secret_key', 'issuer', valid_until=self.now())
real_time.sleep(1)
self.assertRaises(JwtDecodeError, Jwt.from_jwt, expired_jwt.to_jwt(), 'secret_key')
def test_decode_validates_nbf(self):
expired_jwt = DummyJwt('secret_key', 'issuer', nbf=self.now() + 3600) # valid 1hr from now
self.assertRaises(JwtDecodeError, Jwt.from_jwt, expired_jwt.to_jwt(), 'secret_key')
def test_decodes_valid_jwt(self):
expiry_time = self.now() + 1000
example_jwt = jwt_lib.encode(
{'hello': 'world', 'iss': 'me', 'sub': 'being awesome', 'exp': expiry_time},
'secret'
)
decoded_jwt = Jwt.from_jwt(example_jwt, 'secret')
self.assertEqual(decoded_jwt.issuer, 'me')
self.assertEqual(decoded_jwt.subject, 'being awesome')
self.assertEqual(decoded_jwt.valid_until, expiry_time)
self.assertIn('hello', decoded_jwt.payload)
self.assertEqual(decoded_jwt.payload['hello'], 'world')
def test_decode_allows_skip_verification(self):
jwt = DummyJwt('secret', 'issuer', payload={'get': 'rekt'})
decoded_jwt = Jwt.from_jwt(jwt.to_jwt(), key=None)
self.assertEqual(decoded_jwt.issuer, 'issuer')
self.assertEqual(decoded_jwt.payload['get'], 'rekt')
self.assertIsNone(decoded_jwt.secret_key)
| |
###############################################################################
#
# Copyright 2012 Pants Developers (see AUTHORS.txt)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import errno
import socket
import sys
import unittest
from mock import MagicMock
from pants.engine import Engine
from pants._channel import _Channel, HAS_UNIX, HAS_IPV6, InvalidAddressFormatError
import pants._channel
class TestChannelConstructorArguments(unittest.TestCase):
def test_channel_constructor_no_args(self):
channel = _Channel()
self.assertTrue(channel.engine is Engine.instance())
self.assertTrue(channel._socket is None)
def test_channel_constructor_socket_arg(self):
sock = socket.socket()
channel = _Channel(socket=sock)
self.assertTrue(channel._socket is sock)
def test_channel_constructor_engine_arg(self):
engine = Engine()
channel = _Channel(engine=engine)
self.assertTrue(channel.engine is engine)
class TestChannelEngineInteraction(unittest.TestCase):
def test_channel_gets_added_to_engine(self):
engine = Engine()
engine.add_channel = MagicMock()
channel = _Channel(socket=socket.socket(), engine=engine)
engine.add_channel.assert_called_once_with(channel)
channel.close()
def test_channel_gets_removed_from_engine(self):
engine = Engine()
engine.remove_channel = MagicMock()
channel = _Channel(socket=socket.socket(), engine=engine)
channel.close()
engine.remove_channel.assert_called_once_with(channel)
class TestChannelFileno(unittest.TestCase):
def test_channel_fileno_with_no_socket(self):
channel = _Channel()
self.assertTrue(channel.fileno is None)
def test_channel_fileno_with_socket(self):
sock = socket.socket()
channel = _Channel(socket=sock)
self.assertTrue(channel.fileno == sock.fileno())
class TestChannelClose(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
def test_channel_close_does_not_raise_an_exception_with_no_socket(self):
try:
self.channel.close()
except TypeError:
self.fail("Attempted to remove a socketless channel from the engine.")
def test_channel_close_does_not_call_on_close(self):
self.channel.on_close = MagicMock()
self.channel.close()
self.assertRaises(AssertionError, self.channel.on_close.assert_any_call)
class TestChannelSocketSet(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
def test_socket_set_with_acceptable_socket(self):
sock = MagicMock()
sock.family = socket.AF_INET
sock.type = socket.SOCK_STREAM
sock.setblocking = MagicMock()
self.channel._socket_set(sock)
self.assertTrue(self.channel._socket is sock)
sock.setblocking.assert_called_once_with(False)
def test_socket_set_with_preexisting_socket(self):
self.channel._socket = MagicMock()
self.assertRaises(RuntimeError, self.channel._socket_set, None)
def test_socket_set_with_unsupported_family(self):
sock = MagicMock()
sock.family = 9001
self.assertRaises(ValueError, self.channel._socket_set, sock)
def test_socket_set_with_unsupported_type(self):
sock = MagicMock()
sock.family = socket.AF_INET
sock.type = 9001
self.assertRaises(ValueError, self.channel._socket_set, sock)
class TestChannelSocketConnect(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
self.sock = MagicMock()
self.channel._socket = self.sock
def test_connect_ex_returns_success(self):
self.sock.connect_ex = MagicMock(return_value=0)
self.assertTrue(self.channel._socket_connect(None))
def test_connect_ex_returns_EISCONN(self):
self.sock.connect_ex = MagicMock(return_value=errno.EISCONN)
self.assertTrue(self.channel._socket_connect(None))
def test_connect_ex_returns_EAGAIN(self):
self.sock.connect_ex = MagicMock(return_value=errno.EAGAIN)
self.assertFalse(self.channel._socket_connect(None))
def test_connect_ex_returns_EWOULDBLOCK(self):
self.sock.connect_ex = MagicMock(return_value=errno.EWOULDBLOCK)
self.assertFalse(self.channel._socket_connect(None))
def test_connect_ex_returns_EINPROGRESS(self):
self.sock.connect_ex = MagicMock(return_value=errno.EINPROGRESS)
self.assertFalse(self.channel._socket_connect(None))
def test_connect_ex_returns_EALREADY(self):
self.sock.connect_ex = MagicMock(return_value=errno.EALREADY)
self.assertFalse(self.channel._socket_connect(None))
def test_connect_ex_returns_unknown(self):
self.sock.connect_ex = MagicMock(return_value=-1)
self.assertRaises(socket.error, self.channel._socket_connect, None)
def test_connect_ex_raises_unknown(self):
self.sock.connect_ex = MagicMock(side_effect=Exception)
self.assertRaises(Exception, self.channel._socket_connect, None)
def test_reraises_unknown_socket_error(self):
self.sock.connect_ex = MagicMock(side_effect=socket.error(-1))
self.assertRaises(socket.error, self.channel._socket_connect, None)
class TestChannelSocketBind(unittest.TestCase):
def test_bind_is_called(self):
channel = _Channel()
channel._socket = MagicMock()
channel._socket.bind = MagicMock()
channel._socket_bind(None)
channel._socket.bind.assert_called_once_with(None)
class TestChannelSocketListen(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
self.sock = MagicMock()
self.sock.listen = MagicMock()
self.channel._socket = self.sock
def test_listen_is_called(self):
self.channel._socket_listen(1)
self.sock.listen.assert_called_once_with(1)
@unittest.skipUnless(sys.platform.startswith("win"), "Windows-specific functionality.")
def test_listen_backlog_is_corrected_on_windows(self):
self.channel._socket_listen(socket.SOMAXCONN+1)
self.sock.listen.assert_called_once_with(socket.SOMAXCONN)
@unittest.skipIf(sys.platform.startswith("win"), "Non-Windows-specific functionality.")
def test_listen_backlog_is_not_corrected_on_other_platforms(self):
self.channel._socket_listen(socket.SOMAXCONN+1)
self.sock.listen.assert_called_once_with(socket.SOMAXCONN+1)
class TestChannelSocketClose(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
self.sock = MagicMock()
self.channel._socket = self.sock
def test_socket_close(self):
self.channel._socket_close()
self.assertTrue(self.channel._socket is None)
self.assertTrue(self.channel._closed)
def test_shutdown_is_called(self):
shutdown = MagicMock()
self.sock.shutdown = shutdown
self.channel._socket_close()
shutdown.assert_called_once_with(socket.SHUT_RDWR)
def test_close_is_called(self):
close = MagicMock()
self.sock.close = close
self.channel._socket_close()
close.assert_called_once_with()
def test_socket_error_is_raised(self):
socket_error_raiser = MagicMock(side_effect=socket.error)
self.sock.shutdown = socket_error_raiser
try:
self.channel._socket_close()
except socket.error:
self.fail("socket.error was not caught.")
class TestChannelSocketAccept(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
self.sock = MagicMock()
self.channel._socket = self.sock
def test_socket_accept(self):
self.sock.accept = MagicMock(return_value=(1, 2, 3))
self.assertEqual(self.channel._socket_accept(), (1, 2, 3))
def test_accept_raises_EAGAIN(self):
self.sock.accept = MagicMock(side_effect=socket.error(errno.EAGAIN))
self.assertEqual(self.channel._socket_accept(), (None, None))
def test_accept_raises_EWOULDBLOCK(self):
self.sock.accept = MagicMock(side_effect=socket.error(errno.EWOULDBLOCK))
self.assertEqual(self.channel._socket_accept(), (None, None))
def test_accept_raises_unknown(self):
self.sock.accept = MagicMock(side_effect=socket.error(-1))
self.assertRaises(socket.error, self.channel._socket_accept)
class TestChannelSocketRecv(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
self.sock = MagicMock()
self.channel._socket = self.sock
def test_socket_recv(self):
chunk = "foo"
self.sock.recv = MagicMock(return_value=chunk)
result = self.channel._socket_recv()
self.assertEqual(result, chunk)
self.sock.recv.assert_called_once_with(self.channel._recv_amount)
def test_recv_returns_no_data(self):
chunk = ""
self.sock.recv = MagicMock(return_value=chunk)
result = self.channel._socket_recv()
self.assertEqual(result, None)
def test_recv_raises_EAGAIN(self):
self.sock.recv = MagicMock(side_effect=socket.error(errno.EAGAIN))
result = self.channel._socket_recv()
self.assertEqual(result, "")
def test_recv_raises_EWOULDBLOCK(self):
self.sock.recv = MagicMock(side_effect=socket.error(errno.EWOULDBLOCK))
result = self.channel._socket_recv()
self.assertEqual(result, "")
def test_recv_raises_ECONNRESET(self):
self.sock.recv = MagicMock(side_effect=socket.error(errno.ECONNRESET))
result = self.channel._socket_recv()
self.assertEqual(result, None)
def test_recv_raises_unknown(self):
self.sock.recv = MagicMock(side_effect=socket.error(-1))
self.assertRaises(socket.error, self.channel._socket_recv)
class TestChannelSocketRecvFrom(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
self.sock = MagicMock()
self.channel._socket = self.sock
def test_socket_recvfrom(self):
chunk = ("foo", None)
self.sock.recvfrom = MagicMock(return_value=chunk)
result = self.channel._socket_recvfrom()
self.assertEqual(result, chunk)
self.sock.recvfrom.assert_called_once_with(self.channel._recv_amount)
def test_recvfrom_returns_no_data(self):
chunk = ("", None)
self.sock.recvfrom = MagicMock(return_value=chunk)
result = self.channel._socket_recvfrom()
self.assertEqual(result, (None, None))
def test_recvfrom_raises_EAGAIN(self):
self.sock.recvfrom = MagicMock(side_effect=socket.error(errno.EAGAIN))
result = self.channel._socket_recvfrom()
self.assertEqual(result, ("", None))
def test_recvfrom_raises_EWOULDBLOCK(self):
self.sock.recvfrom = MagicMock(side_effect=socket.error(errno.EWOULDBLOCK))
result = self.channel._socket_recvfrom()
self.assertEqual(result, ("", None))
def test_recvfrom_raises_ECONNRESET(self):
self.sock.recvfrom = MagicMock(side_effect=socket.error(errno.ECONNRESET))
result = self.channel._socket_recvfrom()
self.assertEqual(result, ("", None))
def test_recvfrom_raises_unknown(self):
self.sock.recvfrom = MagicMock(side_effect=socket.error(-1))
self.assertRaises(socket.error, self.channel._socket_recvfrom)
class TestChannelSocketSend(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
self.sock = MagicMock()
self.channel._socket = self.sock
def test_socket_send(self):
chunk = "foo"
self.sock.send = MagicMock(return_value=len(chunk))
self.assertEqual(self.channel._socket_send(chunk), len(chunk))
self.sock.send.assert_called_once_with(chunk)
def test_send_raises_EAGAIN(self):
self.sock.send = MagicMock(side_effect=socket.error(errno.EAGAIN))
self.channel._start_waiting_for_write_event = MagicMock()
result = self.channel._socket_send(None)
self.assertEqual(result, 0)
self.channel._start_waiting_for_write_event.assert_called_once_with()
def test_send_raises_EWOULDBLOCK(self):
self.sock.send = MagicMock(side_effect=socket.error(errno.EWOULDBLOCK))
self.channel._start_waiting_for_write_event = MagicMock()
result = self.channel._socket_send(None)
self.assertEqual(result, 0)
self.channel._start_waiting_for_write_event.assert_called_once_with()
def test_send_raises_EPIPE(self):
self.sock.send = MagicMock(side_effect=Exception(errno.EPIPE))
self.channel.close = MagicMock()
result = self.channel._socket_send(None)
self.assertEqual(result, 0)
self.channel.close.assert_called_once_with(flush=False)
def test_send_raises_unknown(self):
self.sock.send = MagicMock(side_effect=Exception(-1))
self.assertRaises(Exception, self.channel._socket_send)
class TestChannelSocketSendTo(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
self.sock = MagicMock()
self.channel._socket = self.sock
def test_socket_sendto(self):
chunk = "foo"
args = (chunk, None, None)
self.sock.sendto = MagicMock(return_value=len(chunk))
self.assertEqual(self.channel._socket_sendto(*args), len(chunk))
self.sock.sendto.assert_called_once_with(*args)
def test_sendto_raises_EAGAIN(self):
self.sock.send = MagicMock(side_effect=socket.error(errno.EAGAIN))
self.channel._start_waiting_for_write_event = MagicMock()
result = self.channel._socket_send(None)
self.assertEqual(result, 0)
self.channel._start_waiting_for_write_event.assert_called_once_with()
def test_sendto_raises_EWOULDBLOCK(self):
self.sock.send = MagicMock(side_effect=socket.error(errno.EWOULDBLOCK))
self.channel._start_waiting_for_write_event = MagicMock()
result = self.channel._socket_send(None)
self.assertEqual(result, 0)
self.channel._start_waiting_for_write_event.assert_called_once_with()
def test_sendto_raises_EPIPE(self):
self.sock.send = MagicMock(side_effect=Exception(errno.EPIPE))
self.channel.close = MagicMock()
result = self.channel._socket_send(None)
self.assertEqual(result, 0)
self.channel.close.assert_called_once_with(flush=False)
def test_sendto_raises_unknown(self):
self.sock.send = MagicMock(side_effect=Exception(-1))
self.assertRaises(Exception, self.channel._socket_send)
class TestChannelSocketSendfile(unittest.TestCase):
def setUp(self):
self._sendfile = pants._channel.sendfile
self.channel = _Channel()
def tearDown(self):
pants._channel.sendfile = self._sendfile
def test_socket_sendfile(self):
chunk = "foo"
args = (chunk, None, None, False)
pants._channel.sendfile = MagicMock(return_value=len(chunk))
self.assertEqual(self.channel._socket_sendfile(*args), len(chunk))
pants._channel.sendfile.assert_called_once_with(chunk, self.channel, None, None, False)
def test_sendfile_raises_EAGAIN(self):
chunk = "foo"
args = (chunk, None, None, False)
err = socket.error(errno.EAGAIN)
err.nbytes = 0 # See issue #43
pants._channel.sendfile = MagicMock(side_effect=err)
self.channel._start_waiting_for_write_event = MagicMock()
result = self.channel._socket_sendfile(*args)
self.assertEqual(result, 0)
self.channel._start_waiting_for_write_event.assert_called_once_with()
def test_sendfile_raises_EWOULDBLOCK(self):
chunk = "foo"
args = (chunk, None, None, False)
err = socket.error(errno.EWOULDBLOCK)
err.nbytes = 0 # See issue #43
pants._channel.sendfile = MagicMock(side_effect=err)
self.channel._start_waiting_for_write_event = MagicMock()
result = self.channel._socket_sendfile(*args)
self.assertEqual(result, 0)
self.channel._start_waiting_for_write_event.assert_called_once_with()
def test_sendfile_raises_EPIPE(self):
chunk = "foo"
args = (chunk, None, None, False)
pants._channel.sendfile = MagicMock(side_effect=Exception(errno.EPIPE))
self.channel.close = MagicMock()
result = self.channel._socket_sendfile(*args)
self.assertEqual(result, 0)
self.channel.close.assert_called_once_with(flush=False)
def test_sendfile_raises_unknown(self):
pants._channel.sendfile = MagicMock(side_effect=Exception((-1,)))
self.assertRaises(Exception, self.channel._socket_sendfile)
class TestChannelStartWaitingForWriteEvent(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
def test_when_write_needs_to_be_added(self):
self.channel._events = Engine.NONE
self.channel.engine.modify_channel = MagicMock()
self.channel._start_waiting_for_write_event()
self.assertEqual(self.channel._events, Engine.WRITE)
self.channel.engine.modify_channel.assert_called_once_with(self.channel)
def test_when_write_doesnt_need_to_be_added(self):
self.channel._events = Engine.WRITE
self.channel.engine.modify_channel = MagicMock()
self.channel._start_waiting_for_write_event()
self.assertEqual(self.channel._events, Engine.WRITE)
self.assertRaises(AssertionError, self.channel.engine.modify_channel.assert_called_once_with)
class TestChannelStopWaitingForWriteEvent(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
def test_when_write_needs_to_be_removed(self):
self.channel._events = Engine.WRITE
self.channel.engine.modify_channel = MagicMock()
self.channel._stop_waiting_for_write_event()
self.assertEqual(self.channel._events, Engine.NONE)
self.channel.engine.modify_channel.assert_called_once_with(self.channel)
def test_when_write_doesnt_need_to_be_removed(self):
self.channel._events = Engine.NONE
self.channel.engine.modify_channel = MagicMock()
self.channel._stop_waiting_for_write_event()
self.assertEqual(self.channel._events, Engine.NONE)
self.assertRaises(AssertionError, self.channel.engine.modify_channel.assert_called_once_with)
class TestChannelSafelyCall(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
def test_with_no_error(self):
args = (1, 2, 3)
kwargs = {"foo": "bar"}
thing_to_call = MagicMock()
self.channel._safely_call(thing_to_call, *args, **kwargs)
thing_to_call.assert_called_once_with(*args, **kwargs)
def test_with_an_error(self):
args = (1, 2, 3)
kwargs = {"foo": "bar"}
thing_to_call = MagicMock(side_effect=Exception())
self.channel._safely_call(thing_to_call, *args, **kwargs)
thing_to_call.assert_called_once_with(*args, **kwargs)
class TestChannelGetSocketError(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
self.sock = MagicMock()
self.channel._socket = self.sock
def test_with_no_error(self):
self.sock.getsockopt = MagicMock(return_value=0)
err, errstr = self.channel._get_socket_error()
self.sock.getsockopt.assert_called_once_with(socket.SOL_SOCKET, socket.SO_ERROR)
self.assertEqual(err, 0)
self.assertEqual(errstr, "")
def test_with_an_error(self):
self.sock.getsockopt = MagicMock(return_value=errno.EAGAIN)
err, errstr = self.channel._get_socket_error()
self.sock.getsockopt.assert_called_once_with(socket.SOL_SOCKET, socket.SO_ERROR)
self.assertEqual(err, errno.EAGAIN)
self.assertNotEqual(errstr, "")
class TestChannelFormatAddress(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
@unittest.skipUnless(HAS_UNIX, "Requires support for UNIX sockets.")
def test_with_unix_address(self):
path = "/home/example/socket"
address, family, resolved = self.channel._format_address(path)
self.assertEqual(address, path)
self.assertEqual(family, socket.AF_UNIX)
self.assertEqual(resolved, True)
@unittest.skipIf(HAS_UNIX, "Requires no support for UNIX sockets.")
def test_when_unix_address_is_invalid(self):
path = "/home/example/socket"
self.assertRaises(InvalidAddressFormatError, self.channel._format_address, path)
def test_with_port_number(self):
port = 8080
address, family, resolved = self.channel._format_address(port)
self.assertEqual(address, ("", port))
self.assertEqual(family, socket.AF_INET)
self.assertEqual(resolved, True)
def test_inaddr_any(self):
addr = ('', 80)
address, family, resolved = self.channel._format_address(addr)
self.assertEqual(address, addr)
self.assertEqual(family, socket.AF_INET)
self.assertEqual(resolved, True)
def test_inaddr6_any(self):
addr = ('', 80, 1, 2)
address, family, resolved = self.channel._format_address(addr)
self.assertEqual(address, addr)
self.assertEqual(family, socket.AF_INET6)
self.assertEqual(resolved, True)
def test_broadcast(self):
addr = ('<broadcast>', 80)
address, family, resolved = self.channel._format_address(addr)
self.assertEqual(address, addr)
self.assertEqual(family, socket.AF_INET)
self.assertEqual(resolved, True)
def test_broadcast6(self):
addr = ('<broadcast>', 80, 1, 2)
address, family, resolved = self.channel._format_address(addr)
self.assertEqual(address, addr)
self.assertEqual(family, socket.AF_INET6)
self.assertEqual(resolved, True)
def test_with_invalid_ipv4_address(self):
addr = (1, 2)
self.assertRaises(InvalidAddressFormatError, self.channel._format_address, addr)
def test_with_ipv4_address(self):
addr = ('8.8.8.8', 2)
address, family, resolved = self.channel._format_address(addr)
self.assertEqual(address, ('8.8.8.8', 2))
self.assertEqual(family, socket.AF_INET)
self.assertEqual(resolved, True)
@unittest.skipUnless(HAS_IPV6, "Requires support for IPv6 sockets.")
def test_with_invalid_ipv6_address(self):
addr = (1, 2, 3, 4)
@unittest.skipUnless(HAS_IPV6, "Requires support for IPv6 sockets.")
def test_with_ipv6_address(self):
addr = ('::1', 2, 3, 4)
address, family, resolved = self.channel._format_address(addr)
self.assertEqual(address, addr)
self.assertEqual(family, socket.AF_INET6)
self.assertEqual(resolved, True)
@unittest.skipIf(HAS_IPV6, "Requires no support for IPv6 sockets.")
def test_when_ipv6_address_is_invalid(self):
addr = (1, 2, 3, 4)
self.assertRaises(InvalidAddressFormatError, self.channel._format_address, addr)
def test_with_invalid_addresses(self):
self.assertRaises(InvalidAddressFormatError, self.channel._format_address, None)
self.assertRaises(InvalidAddressFormatError, self.channel._format_address, (1, 2, 3))
@unittest.skip("Not yet implemented.")
class TestChannelResolveAddress(unittest.TestCase):
@unittest.skipUnless(HAS_UNIX, "Requires support for UNIX sockets.")
def test_resolve_unix_address(self):
self.fail("Not yet implemented.")
def test_resolve_ipv4_address(self):
self.fail("Not yet implemented.")
@unittest.skipUnless(HAS_IPV6, "Requires support for IPv6 sockets.")
def test_resolve_inet6_address(self):
self.fail("Not yet implemented.")
class TestChannelHandleEvents(unittest.TestCase):
def setUp(self):
self.channel = _Channel()
self.channel._handle_read_event = MagicMock()
self.channel._handle_write_event = MagicMock()
self.channel._handle_error_event = MagicMock()
self.channel._handle_hangup_event = MagicMock()
def test_new_events_modify_engine(self):
self.channel.engine.modify_channel = MagicMock()
def add_events():
self._events = Engine.ALL_EVENTS
self.channel._handle_read_event = add_events
self.channel._events = Engine.NONE
self.channel._handle_events(Engine.READ)
self.channel.engine.modify_channel.assert_called_once_with(self.channel)
def test_when_channel_is_closed(self):
self.channel._closed = True
self.channel._handle_events(Engine.READ)
self.assertRaises(AssertionError, self.channel._handle_read_event.assert_called_once_with)
def test_with_no_events(self):
self.channel._handle_events(Engine.NONE)
self.assertRaises(AssertionError, self.channel._handle_read_event.assert_called_once_with)
self.assertRaises(AssertionError, self.channel._handle_write_event.assert_called_once_with)
self.assertRaises(AssertionError, self.channel._handle_error_event.assert_called_once_with)
self.assertRaises(AssertionError, self.channel._handle_hangup_event.assert_called_once_with)
def test_with_all_events(self):
self.channel._handle_events(Engine.ALL_EVENTS)
self.channel._handle_read_event.assert_called_once_with()
self.channel._handle_write_event.assert_called_once_with()
self.channel._handle_error_event.assert_called_once_with()
self.channel._handle_hangup_event.assert_called_once_with()
def test_with_abrupt_close(self):
self.channel._handle_error_event = MagicMock(side_effect=self.channel.close)
self.channel._handle_events(Engine.ALL_EVENTS)
self.channel._handle_read_event.assert_called_once_with()
self.channel._handle_write_event.assert_called_once_with()
self.channel._handle_error_event.assert_called_once_with()
self.assertRaises(AssertionError, self.channel._handle_hangup_event.assert_called_once_with)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Generic nn operators"""
from __future__ import absolute_import as _abs
import tvm
from .. import cpp
def _default_schedule(outs, auto_inline):
"""Default schedule for llvm."""
target = tvm.target.current_target(allow_none=False)
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
if target.target_name not in ("llvm", "c"):
raise RuntimeError("schedule not registered for '%s'" % target)
s = tvm.create_schedule([x.op for x in outs])
if auto_inline:
x = outs[0]
tvm.schedule.AutoInlineInjective(s)
s[x].fuse(s[x].op.axis)
return s
@tvm.target.generic_func
def schedule_conv2d_nchw(outs):
"""Schedule for conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_nhwc_pack(outs):
"""Schedule for conv2d_nhwc_pack
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nhwc_pack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_nhwc(outs):
"""Schedule for conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_NCHWc(outs):
"""Schedule for conv2d_NCHW[x]c
Parameters
----------
outs : Array of Tensor
The computation graph description of conv2d_NCHWc
in the format of an array of tensors.
The number of filter, i.e., the output channel.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_NCHWc_int8(outs):
"""Schedule for conv2d_NCHW[x]c_int8
Parameters
----------
outs : Array of Tensor
The computation graph description of conv2d_NCHWc_int8
in the format of an array of tensors.
The number of filter, i.e., the output channel.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_winograd_weight_transform(outs):
"""Schedule for weight transformation of winograd
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in nnvm PreCompute pass
# so we make a schedule here for cpu llvm
s = tvm.create_schedule([x.op for x in outs])
output = outs[0]
_, G = s[output].op.input_tensors
s[G].compute_inline()
eps, nu, co, ci = s[output].op.axis
r_kh, r_kw = s[output].op.reduce_axis
s[output].reorder(co, ci, r_kh, r_kw, eps, nu)
for axis in [r_kh, r_kw, eps, nu]:
s[output].unroll(axis)
s[output].parallel(co)
return s
@tvm.target.generic_func
def schedule_conv2d_winograd_without_weight_transform(outs):
"""Schedule for winograd without weight transformation
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_winograd_nnpack_weight_transform(outs):
"""Schedule for weight transformation of winograd
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in nnvm PreCompute pass
s = tvm.create_schedule([x.op for x in outs])
return s
@tvm.target.generic_func
def schedule_conv2d_winograd_nnpack_without_weight_transform(outs):
"""Schedule for winograd without weight transformation
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_transpose_nchw(outs):
"""Schedule for conv2d_transpose_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_transpose_nchw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_depthwise_conv2d_nchw(outs):
"""Schedule for depthwise_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_depthwise_conv2d_nhwc(outs):
"""Schedule for depthwise_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_depthwise_conv2d_NCHWc(outs):
"""Schedule for depthwise_conv2d_NCHWc
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_group_conv2d_nchw(outs):
"""Schedule for group_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_deformable_conv2d_nchw(outs):
"""Schedule for deformable_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of deformable_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_bitserial_conv2d_nchw(outs):
"""Schedule for bitserial_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_bitserial_conv2d_nhwc(outs):
"""Schedule for bitserial_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_bitserial_dense(outs):
"""Schedule for bitserial_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_reduce")
def schedule_reduce(outs):
"""Schedule for reduction
Parameters
----------
outs: Array of Tensor
The computation graph description of reduce
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, True)
@tvm.target.override_native_generic_func("schedule_softmax")
def schedule_softmax(outs):
"""Schedule for softmax
Parameters
----------
outs: Array of Tensor
The computation graph description of softmax
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_dense")
def schedule_dense(outs):
"""Schedule for dense
Parameters
----------
outs: Array of Tensor
The computation graph description of dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_pool")
def schedule_pool(outs, layout):
"""Schedule for pool
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
layout: str
Data layout.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_pool_grad(outs):
"""Schedule for pool_grad
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_adaptive_pool")
def schedule_adaptive_pool(outs):
"""Schedule for adaptive pool
Parameters
----------
outs: Array of Tensor
The computation graph description of adaptive pool
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_binarize_pack")
def schedule_binarize_pack(outs):
"""Schedule for binarize_pack
Parameters
----------
outs: Array of Tensor
The computation graph description of binarize_pack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_bitpack")
def schedule_bitpack(outs):
"""Schedule for bitpack
Parameters
----------
outs: Array of Tensor
The computation graph description of bitpack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_binary_dense")
def schedule_binary_dense(outs):
"""Schedule for binary_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of binary_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_lrn(outs):
"""Schedule for lrn
Parameters
----------
outs: Array of Tensor
The computation graph description of lrn
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
target = tvm.target.current_target(allow_none=False)
cpp_target = cpp.TEST_create_target(target.target_name)
return cpp.generic.default_schedule(cpp_target, outs, False)
@tvm.target.generic_func
def schedule_l2_normalize(outs):
"""Schedule for l2 normalize
Parameters
----------
outs: Array of Tensor
The computation graph description of l2 normalize
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
target = tvm.target.current_target(allow_none=False)
cpp_target = cpp.TEST_create_target(target.target_name)
return cpp.generic.default_schedule(cpp_target, outs, False)
@tvm.target.generic_func
def schedule_sparse_dense(outs):
"""Schedule for sparse_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_sparse_transpose(outs):
"""Schedule for sparse_transpose
Parameters
----------
outs: Array of Tensor
The computation graph description of sparse_transpose
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_batch_matmul(outs):
target = tvm.target.current_target(allow_none=False)
cpp_target = cpp.TEST_create_target(target.target_name)
return cpp.generic.default_schedule(cpp_target, outs, False)
| |
import json
import pytest
from bson.objectid import ObjectId
from tests import clear_db
from . import jrequest, get_jwt_auth_header
unauthorized_scenarios = [
['GET', '/api/users', 'Authorization Required', 401],
]
@pytest.mark.parametrize(
'method, url, error, status_code ', unauthorized_scenarios)
def test_unauthorized_request(method, url, error, status_code, client):
response = jrequest(method, url, client)
assert response.status_code == status_code
assert json.loads(response.data.decode('utf-8'))['error'] == error
def test_get_users_without_username(client, mock_user):
clear_db()
user = mock_user('user', 'password')
jwt_header = get_jwt_auth_header('user', 'password', client)
response = json.loads(jrequest(
'GET', '/api/users', client, jwt_header).data.decode('utf-8'))
response = json.loads(response)
expected = {
'status_code': 200,
'data': [{
'id': str(user.id),
'username': user.username
}],
'description': 'Successful Operation',
}
assert sorted(response.items()) == sorted(expected.items())
def test_get_users_specifing_username(client, mock_user):
clear_db()
user = mock_user('user', 'password')
jwt_header = get_jwt_auth_header('user', 'password', client)
response = json.loads(jrequest(
'GET', '/api/users', client, jwt_header).data.decode('utf-8'))
response = json.loads(response)
expected = {
'status_code': 200,
'data': [{
'id': str(user.id),
'username': user.username
}],
'description': 'Successful Operation',
}
assert sorted(response.items()) == sorted(expected.items())
def test_create_an_user_invalid_username(client, mock_user):
clear_db()
user = mock_user('user', 'password')
jwt_header = get_jwt_auth_header('user', 'password', client)
payload = json.dumps({'username': user.username, 'password': 'foo'})
response = jrequest('POST', '/api/users', client, jwt_header, data=payload)
response = json.loads(response.data.decode('utf-8'))
response = json.loads(response)
expected = {
'status_code': 400,
'data': "The user {!r} already exists.".format(user.username),
'error': 'Bad Request'
}
assert sorted(response.items()) == sorted(expected.items())
def test_create_an_user_valid_username(client, mock_user):
clear_db()
mock_user('auth', 'auth')
jwt_header = get_jwt_auth_header('auth', 'auth', client)
payload = json.dumps({'username': 'valid', 'password': 'valid'})
response = jrequest('POST', '/api/users', client, jwt_header, data=payload)
response = json.loads(response.data.decode('utf-8'))
response = json.loads(response)
expected = {
'status_code': 201,
'data': "Created the user 'valid'.",
'description': 'Successfully created'
}
assert sorted(response.items()) == sorted(expected.items())
def test_update_an_user_invalid_username(client, mock_user):
clear_db()
# user to auth
user_to_auth = mock_user('auth', 'auth')
# to test if an username is really unique
user_to_test = mock_user('user', 'password')
jwt_header = get_jwt_auth_header('auth', 'auth', client)
payload = json.dumps({
'user_id': str(user_to_auth.id),
'username': user_to_test.username,
'password': 'password'
})
response = jrequest('PUT', '/api/user', client, jwt_header, data=payload)
response = json.loads(response.data.decode('utf-8'))
response = json.loads(response)
expected = {
'status_code': 400,
'data': "The user {!r} already exists.".format(user_to_test.username),
'error': 'Bad Request'
}
assert sorted(response.items()) == sorted(expected.items())
def test_update_an_user_valid_username(client, mock_user):
clear_db()
user = mock_user('user', 'password')
jwt_header = get_jwt_auth_header('user', 'password', client)
payload = json.dumps({
'user_id': str(user.id),
'username': 'it works',
'password': 'password'
})
response = jrequest('PUT', '/api/user', client, jwt_header, data=payload)
response = json.loads(response.data.decode('utf-8'))
response = json.loads(response)
expected = {
'status_code': 200,
'data': "Updated the user 'it works'.",
'description': 'Successfully updated'
}
assert sorted(response.items()) == sorted(expected.items())
def test_delete_an_user_invalid_user_id(client, mock_user):
clear_db()
mock_user('user', 'password')
jwt_header = get_jwt_auth_header('user', 'password', client)
response = jrequest(
'DELETE', '/api/user/{}'.format(ObjectId()), client, jwt_header)
response = json.loads(response.data.decode('utf-8'))
response = json.loads(response)
expected = {
'status_code': 400,
'data': 'Invalid user id.',
'error': 'Bad Request'
}
assert sorted(response.items()) == sorted(expected.items())
def test_delete_an_user_valid_user_id(client, mock_user):
clear_db()
user_to_delete = mock_user('delete', 'delete')
# user_to_auth
mock_user('user', 'password')
jwt_header = get_jwt_auth_header('user', 'password', client)
response = jrequest(
'DELETE', '/api/user/{}'.format(user_to_delete.id), client, jwt_header)
response = json.loads(response.data.decode('utf-8'))
response = json.loads(response)
expected = {
'status_code': 200,
'data': 'User deleted',
'description': 'Successfully deleted'
}
assert sorted(response.items()) == sorted(expected.items())
| |
"""Negative tests for the parametrization."""
import logging
import re
import pytest
from dvc.parsing import ResolveError
from dvc.parsing.context import Context
from dvc.parsing.interpolate import embrace
from dvc.utils.humanize import join
from . import make_entry_definition, make_foreach_def
def escape_ansi(line):
ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]")
return ansi_escape.sub("", line)
# Tests for the interpolated entries
@pytest.mark.parametrize("vars_", ["${file}_params.yaml", {"foo": "${foo}"}])
def test_vars_interpolation_errors(tmp_dir, dvc, vars_):
definition = make_entry_definition(tmp_dir, "build", {"vars": [vars_]})
with pytest.raises(ResolveError) as exc_info:
definition.resolve()
assert (
str(exc_info.value)
== "failed to parse 'stages.build.vars' in 'dvc.yaml': "
"interpolating is not allowed"
)
def test_failed_to_interpolate(tmp_dir, dvc):
context = Context(models={"foo": "bar"})
definition = make_entry_definition(
tmp_dir, "build", {"cmd": "echo ${models.foo.}"}, context
)
with pytest.raises(ResolveError) as exc_info:
definition.resolve()
assert escape_ansi(str(exc_info.value)) == (
"failed to parse 'stages.build.cmd' in 'dvc.yaml':\n"
"${models.foo.}\n"
" ^\n"
"ParseException: Expected end of text, found '.'"
" (at char 12), (line:1, col:13)"
)
assert definition.context == {"models": {"foo": "bar"}}
def test_local_vars_params_file_not_exist(tmp_dir, dvc):
definition = make_entry_definition(
tmp_dir,
"build",
{"vars": ["not_existing_params.yaml"], "cmd": "echo ${models.foo}"},
)
with pytest.raises(ResolveError) as exc_info:
definition.resolve()
assert str(exc_info.value) == (
"failed to parse stage 'build' in 'dvc.yaml': "
"'not_existing_params.yaml' does not exist"
)
assert not definition.context
def test_specified_key_does_not_exist(tmp_dir, dvc):
definition = make_entry_definition(
tmp_dir,
"build",
{"cmd": "echo ${models.foobar}"},
Context(models={"foo": "foo"}),
)
with pytest.raises(ResolveError) as exc_info:
definition.resolve()
assert str(exc_info.value) == (
"failed to parse 'stages.build.cmd' in 'dvc.yaml': "
"Could not find 'models.foobar'"
)
assert definition.context == {"models": {"foo": "foo"}}
@pytest.mark.parametrize(
"wdir, expected_msg",
[
("${models[foobar]}", " Could not find 'models.foobar'"),
(
"${models.foo]}",
"\n${models.foo]}\n"
" ^\n"
"ParseException: Expected end of text, found ']'"
" (at char 12), (line:1, col:13)",
),
],
)
def test_wdir_failed_to_interpolate(tmp_dir, dvc, wdir, expected_msg):
definition = make_entry_definition(
tmp_dir,
"build",
{"wdir": wdir, "cmd": "echo ${models.bar}"},
Context(models={"bar": "bar"}),
)
with pytest.raises(ResolveError) as exc_info:
definition.resolve()
assert escape_ansi(str(exc_info.value)) == (
"failed to parse 'stages.build.wdir' in 'dvc.yaml':" + expected_msg
)
assert definition.context == {"models": {"bar": "bar"}}
def test_interpolate_non_string(tmp_dir, dvc):
definition = make_entry_definition(
tmp_dir, "build", {"cmd": "echo ${models}"}, Context(models={})
)
with pytest.raises(ResolveError) as exc_info:
definition.resolve()
assert str(exc_info.value) == (
"failed to parse 'stages.build.cmd' in 'dvc.yaml':\n"
"Cannot interpolate data of type 'dict'"
)
assert definition.context == {"models": {}}
def test_partial_vars_doesnot_exist(tmp_dir, dvc):
(tmp_dir / "test_params.yaml").dump({"sub1": "sub1", "sub2": "sub2"})
definition = make_entry_definition(
tmp_dir,
"build",
{"vars": ["test_params.yaml:sub3"], "cmd": "echo ${sub1} ${sub2}"},
)
with pytest.raises(ResolveError) as exc_info:
definition.resolve()
assert str(exc_info.value) == (
"failed to parse stage 'build' in 'dvc.yaml': "
"could not find 'sub3' in 'test_params.yaml'"
)
assert not definition.context
# Tests foreach generated stages and their error messages
def test_foreach_data_syntax_error(tmp_dir, dvc):
definition = make_foreach_def(tmp_dir, "build", "${syntax.[error}", {})
with pytest.raises(ResolveError) as exc_info:
definition.resolve_all()
assert escape_ansi(str(exc_info.value)) == (
"failed to parse 'stages.build.foreach' in 'dvc.yaml':\n"
"${syntax.[error}\n"
" ^\n"
"ParseException: Expected end of text, found '.'"
" (at char 8), (line:1, col:9)"
)
@pytest.mark.parametrize("key", ["modelss", "modelss.123"])
def test_foreach_data_key_does_not_exists(tmp_dir, dvc, key):
definition = make_foreach_def(tmp_dir, "build", embrace(key), {})
with pytest.raises(ResolveError) as exc_info:
definition.resolve_all()
assert str(exc_info.value) == (
"failed to parse 'stages.build.foreach' in 'dvc.yaml': "
f"Could not find '{key}'"
)
@pytest.mark.parametrize(
"foreach_data", ["${foo}", "${dct.model1}", "${lst.0}", "foobar"]
)
def test_foreach_data_expects_list_or_dict(tmp_dir, dvc, foreach_data):
context = Context(
{"foo": "bar", "dct": {"model1": "a-out"}, "lst": ["foo", "bar"]}
)
definition = make_foreach_def(tmp_dir, "build", foreach_data, {}, context)
with pytest.raises(ResolveError) as exc_info:
definition.resolve_all()
assert str(exc_info.value) == (
"failed to resolve 'stages.build.foreach' in 'dvc.yaml': "
"expected list/dictionary, got str"
)
@pytest.mark.parametrize(
"global_data, where",
[
({"item": 10, "key": 10}, "item and key are"),
({"item": 10}, "item is"),
({"key": 5}, "key is"),
],
)
def test_foreach_overwriting_item_in_list(
tmp_dir, dvc, caplog, global_data, where
):
context = Context(global_data)
definition = make_foreach_def(
tmp_dir, "build", {"model1": 10, "model2": 5}, {}, context
)
with caplog.at_level(logging.WARNING, logger="dvc.parsing"):
definition.resolve_all()
assert caplog.messages == [
f"{where} already specified, "
"will be overwritten for stages generated from 'build'"
]
def test_foreach_do_syntax_errors(tmp_dir, dvc):
definition = make_foreach_def(
tmp_dir, "build", ["foo", "bar"], {"cmd": "echo ${syntax.[error}"}
)
with pytest.raises(ResolveError) as exc_info:
definition.resolve_all()
assert escape_ansi(str(exc_info.value)) == (
"failed to parse 'stages.build.cmd' in 'dvc.yaml':\n"
"${syntax.[error}\n"
" ^\n"
"ParseException: Expected end of text, found '.'"
" (at char 8), (line:1, col:9)"
)
@pytest.mark.parametrize(
"key, loc",
[
(
"item.thresh", # the `thresh` in not available on model2`
"stages.build@1.cmd",
),
("foo.bar", "stages.build@0.cmd"), # not available on any stages
],
)
def test_foreach_do_definition_item_does_not_exist(tmp_dir, dvc, key, loc):
context = Context(foo="bar")
definition = make_foreach_def(
tmp_dir,
"build",
[{"thresh": "10"}, {}],
{"cmd": embrace(key)},
context,
)
with pytest.raises(ResolveError) as exc_info:
definition.resolve_all()
assert str(exc_info.value) == (
f"failed to parse '{loc}' in 'dvc.yaml': Could not find '{key}'"
)
# should have no `item` and `key` even though it failed to resolve.
assert context == {"foo": "bar"}
@pytest.mark.parametrize(
"redefine",
[
{"item": 5},
{"key": 5},
{"item": 5, "key": 10},
{"item": {"epochs": 10}},
],
)
@pytest.mark.parametrize("from_file", [True, False])
def test_item_key_in_generated_stage_vars(tmp_dir, dvc, redefine, from_file):
context = Context(foo="bar")
vars_ = [redefine]
if from_file:
(tmp_dir / "test_params.yaml").dump(redefine)
vars_ = ["test_params.yaml"]
definition = make_foreach_def(
tmp_dir,
"build",
{"model1": {"thresh": "10"}, "model2": {"thresh": 5}},
{"vars": vars_, "cmd": "${item}"},
context,
)
with pytest.raises(ResolveError) as exc_info:
definition.resolve_all()
message = str(exc_info.value)
assert (
"failed to parse stage 'build@model1' in 'dvc.yaml': "
"attempted to modify reserved"
) in message
key_or_keys = "keys" if len(redefine) > 1 else "key"
assert f"{key_or_keys} {join(redefine)}" in message
if from_file:
assert "in 'test_params.yaml'" in message
assert context == {"foo": "bar"}
def test_foreach_wdir_key_does_not_exist(tmp_dir, dvc):
definition = make_foreach_def(
tmp_dir,
"build",
"${models}",
{"wdir": "${ite}", "cmd": "echo ${item}"},
Context(models=["foo", "bar"]),
)
with pytest.raises(ResolveError) as exc_info:
definition.resolve_all()
assert (
str(exc_info.value)
== "failed to parse 'stages.build@foo.wdir' in 'dvc.yaml': "
"Could not find 'ite'"
)
assert definition.context == {"models": ["foo", "bar"]}
| |
from pyramid.view import view_config
from confluent_kafka import Producer
from copy import deepcopy
import os
import urllib.request
import requests
import json
import sys
import clincoded.messaging.templates.gci_to_dx, clincoded.messaging.templates.vci_to_dx
affiliation_data = []
saved_affiliation = []
def includeme(config):
config.add_route('publish', '/publish')
config.add_route('generate-clinvar-data', '/generate-clinvar-data')
config.add_route('track-data', '/track-data')
config.add_route('publish-gdm', '/publish-gdm')
config.scan(__name__)
# Retrieve data from search result(s) using a path (list of keys)
def get_data_by_path(data, path, return_no_data=None):
if isinstance(data, dict) and isinstance(path, list) and len(path) > 0 and path[0] in data:
for key in path:
if key in data:
data = data[key]
else:
return return_no_data
return data
else:
return return_no_data
# Check if data (evidence, score, etc.) was created by the target affiliation (from the classification)
def check_data_ownership(data, affiliation):
if 'affiliation' in data and isinstance(data['affiliation'], str):
if isinstance(affiliation, str):
if affiliation == data['affiliation']:
return True
return False
# Check if article should be added to evidence dictionary
def is_article_new(evidence, category, annotation):
if 'article' in annotation:
if 'pmid' in annotation['article']:
if category not in evidence:
evidence[category] = []
return True
else:
for publication in evidence[category]:
if publication['pmid'] == annotation['article']['pmid']:
return False
return True
else:
return False
else:
return False
# Check if scored individual evidence should be added to evidence dictionary
def check_individual_scoring(score, evidence, annotation):
if 'scoreStatus' in score:
if score['scoreStatus'] == 'Score':
if 'caseInfoType' in score:
if is_article_new(evidence, score['caseInfoType'], annotation):
return (True, score['caseInfoType'])
elif score['scoreStatus'] == 'Contradicts':
if is_article_new(evidence, 'contradicts', annotation):
return (True, 'contradicts')
return (False, )
# Check if scored segregation evidence should be added to evidence dictionary
def check_segregation_scoring(family, evidence, annotation):
segregation = get_data_by_path(family, ['segregation'], {})
if 'includeLodScoreInAggregateCalculation' in segregation and segregation['includeLodScoreInAggregateCalculation']:
if 'publishedLodScore' in segregation or 'estimatedLodScore' in segregation:
if 'sequencingMethod' in segregation:
if segregation['sequencingMethod'] == 'Candidate gene sequencing':
if is_article_new(evidence, 'segregation-candidate-sequencing', annotation):
return (True, 'segregation-candidate-sequencing')
elif segregation['sequencingMethod'] == 'Exome/genome or all genes sequenced in linkage region':
if is_article_new(evidence, 'segregation-exome-sequencing', annotation):
return (True, 'segregation-exome-sequencing')
return (False, )
# Check if scored case control evidence should be added to evidence dictionary
def check_case_control_scoring(case_control, score, evidence, annotation):
if 'studyType' in case_control:
if case_control['studyType'] == 'Single variant analysis':
if 'score' in score:
if 'case-control-single-count' not in evidence:
evidence['case-control-single-count'] = 0
evidence['case-control-single-count'] += 1
if 'case-control-single-points' not in evidence:
evidence['case-control-single-points'] = 0
evidence['case-control-single-points'] += score['score']
if is_article_new(evidence, 'case-control-single', annotation):
return (True, 'case-control-single')
elif case_control['studyType'] == 'Aggregate variant analysis':
if 'score' in score:
if 'case-control-aggregate-count' not in evidence:
evidence['case-control-aggregate-count'] = 0
evidence['case-control-aggregate-count'] += 1
if 'case-control-aggregate-points' not in evidence:
evidence['case-control-aggregate-points'] = 0
evidence['case-control-aggregate-points'] += score['score']
if is_article_new(evidence, 'case-control-aggregate', annotation):
return (True, 'case-control-aggregate')
return (False, )
# Check if scored experimental evidence should be added to evidence dictionary
def check_experimental_scoring(experimental, score, evidence, annotation):
experimental_evidence_types = {
'Biochemical Function': 'exp-biochemical-function',
'Protein Interactions': 'exp-protein-interactions',
'Expression': 'exp-expression',
'Functional Alteration': {
'Patient cells': 'exp-functional-alteration-patient-cells',
'Non-patient cells': 'exp-functional-alteration-non-patient-cells'
},
'Model Systems': {
'Non-human model organism': 'exp-model-systems-non-human-model-organism',
'Cell culture model': 'exp-model-systems-cell-culture-model'
},
'Rescue': {
'Human': 'exp-rescue-human',
'Non-human model organism': 'exp-rescue-non-human-model-organism',
'Cell culture model': 'exp-rescue-cell-culture-model',
'Patient cells': 'exp-rescue-patient-cells'
}
}
evidence_category = None
if 'scoreStatus' in score:
if score['scoreStatus'] == 'Score':
if 'evidenceType' in experimental:
if experimental['evidenceType'] in experimental_evidence_types:
if experimental['evidenceType'] == 'Functional Alteration':
if 'functionalAlteration' in experimental:
if 'functionalAlterationType' in experimental['functionalAlteration']:
if experimental['functionalAlteration']['functionalAlterationType'] in experimental_evidence_types['Functional Alteration']:
evidence_category = experimental_evidence_types['Functional Alteration'][experimental['functionalAlteration']['functionalAlterationType']]
elif experimental['evidenceType'] == 'Model Systems':
if 'modelSystems' in experimental:
if 'modelSystemsType' in experimental['modelSystems']:
if experimental['modelSystems']['modelSystemsType'] in experimental_evidence_types['Model Systems']:
evidence_category = experimental_evidence_types['Model Systems'][experimental['modelSystems']['modelSystemsType']]
if 'exp-model-systems-and-rescue-count' not in evidence:
evidence['exp-model-systems-and-rescue-count'] = 0
evidence['exp-model-systems-and-rescue-count'] += 1
elif experimental['evidenceType'] == 'Rescue':
if 'rescue' in experimental:
if 'rescueType' in experimental['rescue']:
if experimental['rescue']['rescueType'] in experimental_evidence_types['Rescue']:
evidence_category = experimental_evidence_types['Rescue'][experimental['rescue']['rescueType']]
if 'exp-model-systems-and-rescue-count' not in evidence:
evidence['exp-model-systems-and-rescue-count'] = 0
evidence['exp-model-systems-and-rescue-count'] += 1
else:
evidence_category = experimental_evidence_types[experimental['evidenceType']]
if evidence_category is not None:
if is_article_new(evidence, evidence_category, annotation):
return (True, evidence_category)
elif score['scoreStatus'] == 'Contradicts':
if is_article_new(evidence, 'contradicts', annotation):
return (True, 'contradicts')
return (False, )
# Rerieve article metadata that will be added to evidence dictionary
def save_article(annotation):
publication = {}
if 'article' in annotation:
if 'title' in annotation['article']:
publication['title'] = annotation['article']['title']
if 'authors' in annotation['article'] and annotation['article']['authors']:
publication['author'] = annotation['article']['authors'][0]
if 'date' in annotation['article'] and isinstance(annotation['article']['date'], str):
publication['pubdate'] = annotation['article']['date'].split(';', 1)[0]
if 'journal' in annotation['article']:
publication['source'] = annotation['article']['journal']
if 'pmid' in annotation['article']:
publication['pmid'] = annotation['article']['pmid']
return publication
# Build evidence dictionary
def gather_evidence(data, user_affiliation):
evidence_publications = {}
if not user_affiliation:
return None
annotations = get_data_by_path(data, ['annotations'], [])
for annotation in annotations:
groups = get_data_by_path(annotation, ['groups'], [])
for group in groups:
families = get_data_by_path(group, ['familyIncluded'], [])
for family in families:
individuals = get_data_by_path(family, ['individualIncluded'], [])
for individual in individuals:
scores = get_data_by_path(individual, ['scores'], [])
for score in scores:
if check_data_ownership(score, user_affiliation):
individual_score = check_individual_scoring(score, evidence_publications, annotation)
if individual_score[0]:
evidence_publications[individual_score[1]].append(save_article(annotation))
break
if check_data_ownership(family, user_affiliation):
segregation_score = check_segregation_scoring(family, evidence_publications, annotation)
if segregation_score[0]:
evidence_publications[segregation_score[1]].append(save_article(annotation))
individuals = get_data_by_path(group, ['individualIncluded'], [])
for individual in individuals:
scores = get_data_by_path(individual, ['scores'], [])
for score in scores:
if check_data_ownership(score, user_affiliation):
individual_score = check_individual_scoring(score, evidence_publications, annotation)
if individual_score[0]:
evidence_publications[individual_score[1]].append(save_article(annotation))
break
families = get_data_by_path(annotation, ['families'], [])
for family in families:
individuals = get_data_by_path(family, ['individualIncluded'], [])
for individual in individuals:
scores = get_data_by_path(individual, ['scores'], [])
for score in scores:
if check_data_ownership(score, user_affiliation):
individual_score = check_individual_scoring(score, evidence_publications, annotation)
if individual_score[0]:
evidence_publications[individual_score[1]].append(save_article(annotation))
break
if check_data_ownership(family, user_affiliation):
segregation_score = check_segregation_scoring(family, evidence_publications, annotation)
if segregation_score[0]:
evidence_publications[segregation_score[1]].append(save_article(annotation))
individuals = get_data_by_path(annotation, ['individuals'], [])
for individual in individuals:
scores = get_data_by_path(individual, ['scores'], [])
for score in scores:
if check_data_ownership(score, user_affiliation):
individual_score = check_individual_scoring(score, evidence_publications, annotation)
if individual_score[0]:
evidence_publications[individual_score[1]].append(save_article(annotation))
break
case_controls = get_data_by_path(annotation, ['caseControlStudies'], [])
for case_control in case_controls:
scores = get_data_by_path(case_control, ['scores'], [])
for score in scores:
if check_data_ownership(score, user_affiliation):
case_control_score = check_case_control_scoring(case_control, score, evidence_publications, annotation)
if case_control_score[0]:
evidence_publications[case_control_score[1]].append(save_article(annotation))
break
experimentals = get_data_by_path(annotation, ['experimentalData'], [])
for experimental in experimentals:
scores = get_data_by_path(experimental, ['scores'], [])
for score in scores:
if check_data_ownership(score, user_affiliation):
experimental_score = check_experimental_scoring(experimental, score, evidence_publications, annotation)
if experimental_score[0]:
evidence_publications[experimental_score[1]].append(save_article(annotation))
break
return evidence_publications
# Build evidence counts dictionary (trimmed from provisional classification points object)
def gather_evidence_counts(points, return_result=False):
keys_to_delete = []
for key, value in points.items():
if isinstance(value, (int, float)):
if 'evidenceCount' not in key or value <= 0:
keys_to_delete.append(key)
elif isinstance(value, dict):
gather_evidence_counts(value)
if not points[key]:
keys_to_delete.append(key)
else:
keys_to_delete.append(key)
# Remove keys with no values
for key in keys_to_delete:
del points[key]
if return_result:
return points
# Add a yes/no value and all contradictory evidence to the message template
def add_contradictory_evidence(data, evidence, template):
contradicting_evidence = get_data_by_path(data, ['resource', 'contradictingEvidence'], {})
if (('proband' in contradicting_evidence and contradicting_evidence['proband']) or
('experimental' in contradicting_evidence and contradicting_evidence['experimental']) or
('caseControl' in contradicting_evidence and contradicting_evidence['caseControl'])):
template['Value'] = 'YES'
if 'contradicts' in evidence:
template['Evidence'] = {
'Publications': evidence['contradicts']
}
else:
template['Value'] = 'NO'
# Load affiliation data from a JSON file maintained for the UI
def load_affiliation_data():
global affiliation_data
if not affiliation_data:
try:
affiliation_data = json.load(open('src/clincoded/static/components/affiliation/affiliations.json'))
except Exception:
pass
# Add dictionary containing secondary contributors/approver to the message template
def add_secondary_contributors_approver(data, template):
global affiliation_data
contributors = get_data_by_path(data, ['resource', 'classificationContributors'], [])
approver = get_data_by_path(data, ['resource', 'additionalApprover'])
if len(contributors) > 0 or approver:
load_affiliation_data()
template['contributors'] = []
if len(contributors) > 0:
for affiliation in affiliation_data:
try:
if affiliation['affiliation_id'] in contributors:
template['contributors'].append({
'id': affiliation['affiliation_id'],
'name': affiliation['affiliation_fullname'],
'role': 'secondary contributor'
})
except Exception:
pass
try:
template['contributors'].sort(key = lambda contributor: contributor['name'])
except Exception:
pass
if approver:
for affiliation in affiliation_data:
try:
if approver == affiliation['subgroups']['gcep']['id']:
template['contributors'].append({
'id': approver,
'name': affiliation['subgroups']['gcep']['fullname'],
'role': 'secondary approver'
})
break
except Exception:
pass
try:
if approver == affiliation['subgroups']['vcep']['id']:
template['contributors'].append({
'id': approver,
'name': affiliation['subgroups']['vcep']['fullname'],
'role': 'secondary approver'
})
break
except Exception:
pass
# Lookup affiliation data associated with a provided ID
def lookup_affiliation_data(affiliation_id, affiliation_key, affiliation_subgroup=None):
global affiliation_data
global saved_affiliation
if affiliation_id and affiliation_key:
if not saved_affiliation or 'affiliation_id' not in saved_affiliation or affiliation_id != saved_affiliation['affiliation_id']:
load_affiliation_data()
for affiliation in affiliation_data:
try:
if affiliation_id == affiliation['affiliation_id']:
saved_affiliation = affiliation
break
except Exception:
pass
try:
if affiliation_subgroup:
return saved_affiliation['subgroups'][affiliation_subgroup][affiliation_key]
else:
return saved_affiliation[affiliation_key]
except Exception:
pass
return None
else:
return None
# Traverse message template, performing various data retrieval/update operations
def add_data_to_msg_template(data, evidence, evidence_counts, template):
keep_falsy_data = False
keys_to_delete = []
for key, value in template.items():
if isinstance(value, str):
if value == '':
keys_to_delete.append(key)
elif isinstance(value, list):
value_length = len(value)
if value_length > 0:
# Retrieve data using data path lists
if value[0] == '$PATH_TO_DATA':
template[key] = get_data_by_path(data, value[1:])
# Keep first, non-excluded data found (using data path lists)
elif value[0] == '$USE_FIRST_DATA':
if value_length > 2:
for data_path in value[2:]:
temp_result = get_data_by_path(data, data_path)
if temp_result not in {value[1], None}:
break
if temp_result != value[1]:
template[key] = temp_result
else:
template[key] = ''
else:
template[key] = ''
# Use one of two provided values, based on data (from a data path list)
elif value[0] == '$CHECK_FOR_DATA':
if value_length == 4:
if get_data_by_path(data, value[1]):
template[key] = value[2]
else:
template[key] = value[3]
else:
template[key] = ''
# Replace data (from a data path list) using the provided strings
elif value[0] == '$REPLACE_DATA':
if value_length == 4:
temp_result = get_data_by_path(data, value[1])
if isinstance(temp_result, str):
template[key] = temp_result.replace(value[2], value[3])
else:
template[key] = ''
else:
template[key] = ''
# Convert data (from a data path list) using the provided map
elif value[0] == '$CONVERT_DATA':
if value_length == 3:
temp_result = get_data_by_path(data, value[1])
default_result_key = '$DEFAULT'
if temp_result in value[2]:
template[key] = value[2][temp_result]
elif default_result_key in value[2]:
template[key] = value[2][default_result_key]
else:
template[key] = ''
else:
template[key] = ''
# Combine data (from dictionary of data path lists) with a separator
elif value[0] == '$COMBINE_DATA':
if value_length == 3:
add_data_to_msg_template(data, evidence, evidence_counts, value[2])
template[key] = value[1].join(value[2].values())
else:
template[key] = ''
# Lookup an affiliation name by ID (from a data path list)
elif value[0] == '$LOOKUP_AFFILIATION_DATA':
if value_length == 4:
template[key] = lookup_affiliation_data(get_data_by_path(data, value[1]), value[3], value[2])
elif value_length == 3:
template[key] = lookup_affiliation_data(get_data_by_path(data, value[1]), value[2])
else:
template[key] = ''
# Add evidence count (using a data path list)
elif value[0] == '$EVIDENCE_COUNT':
if value_length == 2:
template[key] = get_data_by_path(evidence_counts, value[1])
else:
template[key] = ''
# Add score (using a data path list)
elif value[0] == '$SCORE_DATA':
if value_length >= 3:
template[key] = get_data_by_path(data, value[1])
# If score is zero, check if it should be included in message (e.g. if evidence count is non-zero)
if template[key] == 0:
if value[2] == True:
keep_falsy_data = True
else:
for data_path in value[2:]:
if get_data_by_path(evidence_counts, data_path):
keep_falsy_data = True
break
else:
template[key] = ''
# Add evidence (articles, counts or points) based on information type
elif value[0] == '$EVIDENCE_DATA':
if value_length in (2, 3) and value[1] in evidence:
template[key] = evidence[value[1]]
if not template[key] and value_length == 3 and value[2] == True:
keep_falsy_data = True
else:
template[key] = ''
else:
for element in value:
if isinstance(element, dict):
add_data_to_msg_template(data, evidence, evidence_counts, element)
# Save keys with falsy values for later deletion
if not template[key]:
if keep_falsy_data:
keep_falsy_data = False
else:
keys_to_delete.append(key)
elif isinstance(value, dict):
add_data_to_msg_template(data, evidence, evidence_counts, value)
# Special handling to incorporate contradictory evidence (articles)
if key == 'ValidContradictoryEvidence':
add_contradictory_evidence(data, evidence, value)
# Special handling to incorporate secondary contributors/approver
elif key == 'summary':
add_secondary_contributors_approver(data, value)
if not template[key]:
keys_to_delete.append(key)
# Remove keys with no values
for key in keys_to_delete:
del template[key]
# Remove unnecessary data from interpretation (before sending it to transformation service)
def remove_data_from_msg_template(delete_list, template):
for data_path in delete_list:
try:
data_to_delete = template
data_to_delete_check = True
# Subsequent for loop expects a path (list of keys), not a string
if isinstance(data_path, str):
data_path = delete_list
# Check if data exists at specified path (up to second-to-last element)
for key in data_path[:-1]:
if key in data_to_delete:
data_to_delete = data_to_delete[key]
else:
data_to_delete_check = False
break
if data_to_delete_check:
# If last path element is a list, expect remaining data to be structured as a list of dictionaries
if isinstance(data_path[-1], list):
for element in data_to_delete:
remove_data_from_msg_template(data_path[-1], element)
elif data_path[-1] in data_to_delete:
del data_to_delete[data_path[-1]]
# Continue processing deletion list if/when a single path has problems
except (IndexError, KeyError):
pass
# Transform interpretation to SEPIO format (via transformation service)
def transform_interpretation(source_data, request_host):
# Prepare interpretation to be sent to transformation service
try:
source_data_str = json.dumps(source_data, separators=(',', ':'))
except Exception:
raise Exception('Failed to build complete message')
# Send interpretation to transformation service
try:
service_url = 'http://localhost:3000'
if request_host != 'localhost:6543':
service_url = 'https://g3xjft14o7.execute-api.us-west-2.amazonaws.com/default/VCI-to-CG_SEPIO'
transform_result = requests.post('{}/vci2cgsepio'.format(service_url), headers={'Content-Type': 'application/json'}, data=source_data_str, timeout=10)
except Exception:
raise Exception('Data transformation service unavailable')
if transform_result.status_code != requests.codes.ok:
raise Exception('Data transformation failed')
# Return result of transformation service as JSON-encoded content
try:
return transform_result.json()
except Exception:
raise Exception('Result of data transformation not in expected format')
# Generate ClinVar submission data for interpretation (via ClinVar submitter service)
def request_clinvar_data(source_data):
# Prepare interpretation to be sent to ClinVar submitter service
try:
source_data_str = json.dumps(source_data, separators=(',', ':'))
except Exception:
raise Exception('Preparation of source data for generation service failed')
# Send interpretation to ClinVar submitter service
try:
service_url = 'http://clinvar-submitter.clinicalgenome.org/api/v1/submission'
clinvar_result = requests.post('{}'.format(service_url), headers={'Content-Type': 'application/json'}, data=source_data_str, timeout=10)
except Exception:
raise Exception('Data generation service unavailable')
if clinvar_result.status_code != requests.codes.ok:
raise Exception('Data generation failed')
# Return result of ClinVar submitter service as JSON-encoded content
try:
return clinvar_result.json()
except Exception:
raise Exception('Result of data generation not in expected format')
# Publish the message
@view_config(route_name='publish', request_method='GET')
def publish(request):
elasticsearch_server = 'http://localhost:9200/clincoded'
return_object = {'status': 'Fail',
'message': 'Unable to deliver message'}
# Check that required parameters have been provided
if not('type' in request.params and 'uuid' in request.params):
return_object['message'] = 'Required parameters missing in request'
return return_object
# Attempt to retrieve data (from Elasticsearch)
try:
searchRes = requests.get('{}/{}/{}'.format(elasticsearch_server, request.params['type'], request.params['uuid']), timeout=10)
if searchRes.status_code != requests.codes.ok:
return_object['message'] = 'Data search failed'
return return_object
except Exception as e:
return_object['message'] = 'Data search could not be completed'
return return_object
# Store JSON-encoded content of search result(s)
try:
resultJSON = searchRes.json()
except Exception as e:
return_object['message'] = 'Retrieved data not in expected format'
return return_object
# Check that search found data
if 'found' not in resultJSON or not(resultJSON['found']):
return_object['message'] = 'Requested data could not be found'
return return_object
# Check that data has expected elements
try:
data_type_to_publish = resultJSON['_source']['embedded']['resourceType']
if data_type_to_publish == 'classification':
evidence_to_publish = resultJSON['_source']['embedded']['resourceParent']['gdm']
publishing_affiliation = resultJSON['_source']['embedded']['resource']['affiliation']
evidence_counts_to_publish = resultJSON['_source']['embedded']['resource']['classificationPoints']
elif data_type_to_publish == 'interpretation':
evidence_to_publish = resultJSON['_source']['embedded']['resourceParent']['interpretation']
else:
raise Exception
except Exception as e:
return_object['message'] = 'Retrieved data missing expected elements'
return return_object
# Check that message should be sent? (approved status? permission to publish?)
# Construct message
try:
if data_type_to_publish == 'interpretation':
message_template = deepcopy(clincoded.messaging.templates.vci_to_dx.message_template)
data_to_remove = clincoded.messaging.templates.vci_to_dx.data_to_remove
add_data_to_msg_template(resultJSON['_source']['embedded'], None, None, message_template)
else:
message_template = deepcopy(clincoded.messaging.templates.gci_to_dx.message_template)
classification_points = deepcopy(evidence_counts_to_publish)
add_data_to_msg_template(resultJSON['_source']['embedded'], gather_evidence(evidence_to_publish, publishing_affiliation),
gather_evidence_counts(classification_points, True), message_template)
message = json.dumps(message_template, separators=(',', ':'))
except Exception as e:
return_object['message'] = 'Failed to build complete message'
return return_object
# Transform message (if necessary, via independent service)
try:
if data_type_to_publish == 'interpretation':
remove_data_from_msg_template(data_to_remove, message_template['interpretation'])
message_template['interpretation'] = transform_interpretation(message_template['interpretation'], request.host)
message = json.dumps(message_template, separators=(',', ':'))
except Exception as e:
if e.args:
return_object['message'] = e.args
else:
return_object['message'] = 'Failed to build complete message'
return return_object
# Configure message delivery parameters
kafka_cert_pw = ''
if 'KAFKA_CERT_PW' in os.environ:
kafka_cert_pw = os.environ['KAFKA_CERT_PW']
kafka_conf = {'bootstrap.servers': 'localhost:9093',
'log_level': 0,
'security.protocol': 'ssl',
'ssl.key.location': 'etc/certs/client.key',
'ssl.key.password': kafka_cert_pw,
'ssl.certificate.location': 'etc/certs/client.crt',
'ssl.ca.location': 'etc/certs/server.crt'}
kafka_topic = 'test'
kafka_timeout = 10
if request.host != 'localhost:6543':
kafka_conf = {'bootstrap.servers': 'exchange.clinicalgenome.org:9093',
'log_level': 0,
'security.protocol': 'ssl',
'ssl.key.location': 'etc/certs/dataexchange/client.key',
'ssl.key.password': kafka_cert_pw,
'ssl.certificate.location': 'etc/certs/dataexchange/client.crt',
'ssl.ca.location': 'etc/certs/dataexchange/server.crt'}
if data_type_to_publish == 'interpretation':
kafka_topic = 'variant_interpretation'
else:
kafka_topic = 'gene_validity'
if request.host != 'curation.clinicalgenome.org':
kafka_topic += '_dev'
# Send message
p = Producer(**kafka_conf)
def delivery_callback(err, msg):
nonlocal return_object
if err:
return_object['message'] = err
else:
return_object = {'status': 'Success',
'message': message,
'partition': msg.partition(),
'offset': msg.offset()}
try:
p.produce(kafka_topic, message, callback=delivery_callback)
p.flush(kafka_timeout)
return return_object
except Exception as e:
return_object['message'] = 'Message delivery failed'
return return_object
# Publish GDM - send GDM snapshot data (message) to Data Exchange when GDM is published
@view_config(route_name='publish-gdm', request_method='POST')
def publish_gdm(request):
return_object = {'status': 'Error',
'message': 'Unable to deliver full GDM'}
# Store JSON-encoded content of data
try:
resultJSON = request.json
except Exception as e:
sys.stderr.write('**** publish-gdm: Error sending data to Data Exchange - data not in expected format ****\n')
return_object['message'] = 'Retrieved data not in expected format'
return return_object
# Construct message
try:
message = json.dumps(resultJSON, separators=(',', ':'))
except Exception as e:
sys.stderr.write('**** publish-gdm: Error sending data to Data Exchange - failed to build complete message ****\n')
if e.args:
return_object['message'] = e.args
else:
return_object['message'] = 'Failed to build complete message'
return return_object
return_object = {'status': 'Error',
'message': message,
'error': 'Unable to deliver full GDM'}
# Set snapshot uuid and publish date as message key
key = resultJSON['uuid'] + '-' + resultJSON['resource']['publishDate']
# Configure message delivery parameters
kafka_cert_pw = ''
if 'KAFKA_CERT_PW' in os.environ:
kafka_cert_pw = os.environ['KAFKA_CERT_PW']
kafka_conf = {'bootstrap.servers': 'localhost:9093',
'log_level': 0,
'compression.type': 'gzip',
'security.protocol': 'ssl',
'ssl.key.location': 'etc/certs/client.key',
'ssl.key.password': kafka_cert_pw,
'ssl.certificate.location': 'etc/certs/client.crt',
'ssl.ca.location': 'etc/certs/server.crt'},
kafka_topic = 'test'
kafka_timeout = 10
if request.host != 'localhost:6543':
kafka_conf = {'bootstrap.servers': 'exchange.clinicalgenome.org:9093',
'log_level': 0,
'compression.type': 'gzip',
'security.protocol': 'ssl',
'ssl.key.location': 'etc/certs/dataexchange/client.key',
'ssl.key.password': kafka_cert_pw,
'ssl.certificate.location': 'etc/certs/dataexchange/client.crt',
'ssl.ca.location': 'etc/certs/dataexchange/server.crt'}
# kafka topic
kafka_topic = 'gene_validity_raw'
if request.host != 'curation.clinicalgenome.org':
kafka_topic += '_dev'
# Send message
p = Producer(**kafka_conf)
def delivery_callback(err, msg):
nonlocal return_object
if err:
return_object = {'status': 'Error',
'message': message,
'error': err}
else:
return_object = {'status': 'Success',
'message': message,
'partition': msg.partition(),
'offset': msg.offset()}
try:
p.produce(kafka_topic, message, key, callback=delivery_callback)
p.flush(kafka_timeout)
if return_object['status'] == 'Error':
sys.stderr.write('**** publish-gdm: Error sending data to Data Exchange - kafka sever error\n Data - %s \n ****\n' % message)
return return_object
except Exception as e:
return_object['message'] = 'Message delivery failed'
sys.stderr.write('**** publish-gdm: Error sending data to Data Exchange - delivery failed\n Data - %s \n ****\n' % message)
return return_object
# Generate data for a ClinVar submission file
@view_config(route_name='generate-clinvar-data', request_method='GET')
def generate_clinvar_data(request):
elasticsearch_server = 'http://localhost:9200/clincoded'
return_object = {'status': 'Fail',
'message': 'Unable to generate data'}
# Check that required parameters have been provided
if not('type' in request.params and 'uuid' in request.params):
return_object['message'] = 'Required parameters missing in request'
return return_object
# Attempt to retrieve data (from Elasticsearch)
try:
searchRes = requests.get('{}/{}/{}'.format(elasticsearch_server, request.params['type'], request.params['uuid']), timeout=10)
if searchRes.status_code != requests.codes.ok:
return_object['message'] = 'Data search failed'
return return_object
except Exception as e:
return_object['message'] = 'Data search could not be completed'
return return_object
# Store JSON-encoded content of search result(s)
try:
resultJSON = searchRes.json()
except Exception as e:
return_object['message'] = 'Retrieved data not in expected format'
return return_object
# Check that search found data
if 'found' not in resultJSON or not(resultJSON['found']):
return_object['message'] = 'Requested data could not be found'
return return_object
# Check that data has expected elements
try:
if resultJSON['_source']['embedded']['resourceType'] != 'interpretation':
raise Exception
except Exception as e:
return_object['message'] = 'Retrieved data missing expected elements'
return return_object
# Check that data can be submitted to ClinVar? (approved status? permission to generate?)
# Collect data
try:
data_set_template = deepcopy(clincoded.messaging.templates.vci_to_dx.message_template)
data_to_remove = clincoded.messaging.templates.vci_to_dx.data_to_remove
add_data_to_msg_template(resultJSON['_source']['embedded'], None, None, data_set_template)
except Exception as e:
return_object['message'] = 'Failed to build complete data set'
return return_object
# Transform data (for ClinVar submission)
try:
remove_data_from_msg_template(data_to_remove, data_set_template['interpretation'])
data_set_template['interpretation'] = transform_interpretation(data_set_template['interpretation'], request.host)
data_set = request_clinvar_data(data_set_template['interpretation'])
return_object = {'status': 'Success',
'message': data_set}
except Exception as e:
if e.args:
return_object['message'] = e.args
else:
return_object['message'] = 'Failed to build complete data set'
return return_object
# track data - send GCI events tracking data(message) to Data Exchange
# Events include GDM is created, classification is provisionally approved, approved, published, and unpublished
@view_config(route_name='track-data', request_method='POST')
def track_data(request):
elasticsearch_server = 'http://localhost:9200/clincoded'
return_object = {'status': 'Error',
'message': 'Unable to deliver track data'}
# Store JSON-encoded content of data
try:
resultJSON = request.json
except Exception as e:
sys.stderr.write('**** track-data: Error sending data to Data Exchange - data not in expected format ****\n')
return_object['message'] = 'Retrieved data not in expected format'
return return_object
# Construct message
try:
message = json.dumps(resultJSON, separators=(',', ':'))
except Exception as e:
sys.stderr.write('**** track-data: Error sending data to Data Exchange - failed to build complete message ****\n')
if e.args:
return_object['message'] = e.args
else:
return_object['message'] = 'Failed to build complete message'
return return_object
return_object = {'status': 'Error',
'message': message,
'error': 'Unable to deliver track data'}
# Set GDM uuid and action date as message key
key = resultJSON['report_id'] + '-' + resultJSON['date']
# Configure message delivery parameters
kafka_cert_pw = ''
if 'KAFKA_CERT_PW' in os.environ:
kafka_cert_pw = os.environ['KAFKA_CERT_PW']
kafka_conf = {'bootstrap.servers': 'localhost:9093',
'log_level': 0,
'security.protocol': 'ssl',
'ssl.key.location': 'etc/certs/client.key',
'ssl.key.password': kafka_cert_pw,
'ssl.certificate.location': 'etc/certs/client.crt',
'ssl.ca.location': 'etc/certs/server.crt'}
kafka_topic = 'test'
kafka_timeout = 10
if request.host != 'localhost:6543':
kafka_conf = {'bootstrap.servers': 'exchange.clinicalgenome.org:9093',
'log_level': 0,
'security.protocol': 'ssl',
'ssl.key.location': 'etc/certs/dataexchange/client.key',
'ssl.key.password': kafka_cert_pw,
'ssl.certificate.location': 'etc/certs/dataexchange/client.crt',
'ssl.ca.location': 'etc/certs/dataexchange/server.crt'}
# kafka topic
kafka_topic = 'gene_validity_events'
if request.host != 'curation.clinicalgenome.org':
kafka_topic += '_dev'
# Send message
p = Producer(**kafka_conf)
def delivery_callback(err, msg):
nonlocal return_object
if err:
return_object = {'status': 'Error',
'message': message,
'error': err}
else:
return_object = {'status': 'Success',
'message': message,
'partition': msg.partition(),
'offset': msg.offset()}
try:
p.produce(kafka_topic, message, key, callback=delivery_callback)
p.flush(kafka_timeout)
if return_object['status'] == 'Error':
sys.stderr.write('**** track-data: Error sending data to Data Exchange - kafka sever error\n Data - %s \n ****\n' % message)
return return_object
except Exception as e:
return_object['message'] = 'Message delivery failed'
sys.stderr.write('**** track-data: Error sending data to Data Exchange - delivery failed\n Data - %s \n ****\n' % message)
return return_object
| |
import socket
import random
from pprint import pformat
from errno import EAGAIN, EWOULDBLOCK
from Queue import Queue
from Queue import Empty as QueueEmpty
class UnknownBrokerResponseError(Exception):
"""An unexpected response was received from the broker."""
class BrokerErrorResponse(Exception):
"""Received error from the broker."""
class IntermediateMessageQueue(object):
"""Internal message queue that holds messages received by the server.
This to make sure a message isn't received instead of a command response
after issuing a receipt request.
"""
def __init__(self):
self._queue = Queue()
def put(self, frame):
"""Put a new frame onto the message queue.
:param frame: A :class:`Frame` instance.
"""
if "destination" not in frame.headers:
return
self._queue.put(frame)
def get(self, frame, nb=False):
"""Get a new frame from the message queue.
If no frame is available it try to get the next frame
from the socket.
:param frame: A :class:`Frame` instance.
:keyword nb: Non-blocking.
"""
try:
return self._queue.get_nowait()
except QueueEmpty:
return frame.parse_frame(nb=nb)
class Frame(object):
"""Build and manage a STOMP Frame.
:keyword sock: An open socket to the STOMP server.
"""
def __init__(self, sock=None):
self.command = None
self.headers = {}
self.body = None
self.session = None
self.my_name = socket.gethostbyname(socket.gethostname())
self.sock = sock
self.iqueue = IntermediateMessageQueue()
self.rqueue = Queue()
def connect(self, sock, username=None, password=None, clientid=None):
"""Connect to the STOMP server and get the session id.
:param sock: Socket object from stompy.stomp.Stomp.
:keyword username: Username for connection.
:keyword password: Password for connection.
:keyword clientid: Client identification for persistent connections
"""
self.sock = sock
headers = {}
if username and password:
headers.update({'login': username,
'passcode': password})
if clientid:
headers.update({'client-id' : clientid})
frame = self.build_frame({"command": "CONNECT", "headers": headers})
self.send_frame(frame.as_string())
# Get session from the next reply from the server.
next_frame = self.get_reply()
self.session = next_frame.headers
def build_frame(self, args, want_receipt=False):
"""Build a frame based on a :class:`dict` of arguments.
:param args: A :class:`dict` of arguments for the frame.
:keyword want_receipt: Optional argument to get a receipt from
the sever that the frame was received.
Example
>>> frame = frameobj.build_frame({"command": 'CONNECT',
"headers": {},
want_receipt=True)
"""
self.command = args.get('command')
self.headers = args.get('headers')
self.body = args.get('body')
if want_receipt:
receipt_stamp = str(random.randint(0, 10000000))
self.headers["receipt"] = "%s-%s" % (
self.session.get("session"), receipt_stamp)
return self
def as_string(self):
"""Raw string representation of this frame
Suitable for passing over a socket to the STOMP server.
Example
>>> stomp.send(frameobj.as_string())
"""
command = self.command
headers = self.headers
body = self.body
bytes_message = False
if 'bytes_message' in headers:
bytes_message = True
del headers['bytes_message']
headers['content-length'] = len(body)
headers['x-client'] = self.my_name
# Convert and append any existing headers to a string as the
# protocol describes.
headerparts = ("%s:%s\n" % (key, value)
for key, value in headers.iteritems())
# Frame is Command + Header + EOF marker.
frame = "%s\n%s\n%s\x00" % (command, "".join(headerparts), body)
return frame
def get_message(self, nb=False):
"""Get next message frame.
:keyword nb: Non-blocking: If this is set and there is no
messages currently waiting, this functions returns ``None``
instead of waiting for more data.
"""
while True:
frame = self.iqueue.get(self, nb=nb)
if not frame and nb:
return None
if frame.command == "MESSAGE":
return frame
else:
self.rqueue.put(frame)
def get_reply(self, nb=False):
"""Get command reply frame.
:keyword nb: Non-blocking: If this is set and there is no
messages currently waiting, this functions returns ``None``
instead of waiting for more data.
"""
while True:
try:
return self.rqueue.get_nowait()
except QueueEmpty:
frame = self.parse_frame(nb=nb)
if not frame and nb:
return None
if frame.command == "MESSAGE":
self.iqueue.put(frame)
else:
self.rqueue.put(frame)
def parse_frame(self, nb=False):
"""Parse data from socket
:keyword nb: Non-blocking: If this is set and there is no
messages currently waiting, this functions returns ``None``
instead of waiting for more data.
Example
>>> frameobj.parse_frame()
"""
line = self._getline(nb=nb)
if not line:
return
command = self.parse_command(line)
line = line[len(command)+1:]
headers_str, _, body = line.partition("\n\n")
if not headers_str:
raise UnknownBrokerResponseError(
"Received: (%s)" % line)
headers = self.parse_headers(headers_str)
if 'content-length' in headers:
headers['bytes_message'] = True
if command == 'ERROR':
raise BrokerErrorResponse(
"Broker Returned Error: %s" % body)
frame = Frame(self.sock)
return frame.build_frame({'command': command,
'headers': headers,
'body': body})
def parse_command(self, command_str):
"""Parse command received from the server.
:param command_str: String to parse command from
"""
command = command_str.split('\n', 1)[0]
return command
def parse_headers(self, headers_str):
"""Parse headers received from the servers and convert
to a :class:`dict`.i
:param headers_str: String to parse headers from
"""
# george:constanza\nelaine:benes
# -> {"george": "constanza", "elaine": "benes"}
return dict(line.split(":", 1) for line in headers_str.split("\n"))
def send_frame(self, frame):
"""Send frame to server, get receipt if needed.
:param frame: :class:`Frame` instance to pass across the socket
"""
self.sock.sendall(frame)
if 'receipt' in self.headers:
return self.get_reply()
def _getline(self, nb=False):
"""Get a single line from socket
:keyword nb: Non-blocking: If this is set, and there are no
messages to receive, this function returns ``None``.
"""
self.sock.setblocking(not nb)
try:
buffer = ''
partial = ''
while not buffer.endswith('\x00'):
try:
partial = self.sock.recv(1)
if not partial or partial == '':
raise UnknownBrokerResponseError('empty reply')
except socket.error, exc:
if exc[0] == EAGAIN or exc[0] == EWOULDBLOCK:
if not buffer or buffer == '\n':
raise UnknownBrokerResponseError('empty reply')
continue
buffer += partial
finally:
self.sock.setblocking(nb)
# ** Nasty Alert **
# There may be a left over newline
# RabbitMQ doesn't have a newline after \x00
# ActiveMQ does. This is a hack around that.
# http://stomp.codehaus.org/Protocol mentions
# nothing about a newline following the NULL (^@)
if buffer[:1] == '\n':
return buffer[1:-1]
return buffer[:-1]
def __repr__(self):
return "<Frame %s>" % pformat(self.headers)
| |
#!/usr/bin/python
'''
The MIT License (MIT)
Copyright (c) 2013 winlin
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
"""
the api-server is a default demo server for srs to call
when srs get some event, for example, when client connect
to srs, srs can invoke the http api of the api-server
"""
import sys
# reload sys model to enable the getdefaultencoding method.
reload(sys)
# set the default encoding to utf-8
# using exec to set the encoding, to avoid error in IDE.
exec("sys.setdefaultencoding('utf-8')")
assert sys.getdefaultencoding().lower() == "utf-8"
import json, datetime, cherrypy
# simple log functions.
def trace(msg):
date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print "[%s][trace] %s"%(date, msg)
# enable crossdomain access for js-client
def enable_crossdomain():
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
cherrypy.response.headers["Access-Control-Allow-Methods"] = "GET, POST, HEAD, PUT, DELETE"
# generate allow headers for crossdomain.
allow_headers = ["Cache-Control", "X-Proxy-Authorization", "X-Requested-With", "Content-Type"]
cherrypy.response.headers["Access-Control-Allow-Headers"] = ",".join(allow_headers)
# error codes definition
class Error:
# ok, success, completed.
success = 0
# error when parse json
system_parse_json = 100
# request action invalid
request_invalid_action = 200
'''
handle the clients requests: connect/disconnect vhost/app.
'''
class RESTClients(object):
exposed = True
def GET(self):
enable_crossdomain()
clients = {}
return json.dumps(clients)
'''
for SRS hook: on_connect/on_close
on_connect:
when client connect to vhost/app, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_connect",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live",
"pageUrl": "http://www.test.com/live.html"
}
on_close:
when client close/disconnect to vhost/app/stream, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_close",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live"
}
if valid, the hook must return HTTP code 200(Stauts OK) and response
an int value specifies the error code(0 corresponding to success):
0
'''
def POST(self):
enable_crossdomain()
# return the error code in str
code = Error.success
req = cherrypy.request.body.read()
trace("post to clients, req=%s"%(req))
try:
json_req = json.loads(req)
except Exception, ex:
code = Error.system_parse_json
trace("parse the request to json failed, req=%s, ex=%s, code=%s"%(req, ex, code))
return str(code)
action = json_req["action"]
if action == "on_connect":
code = self.__on_connect(json_req)
elif action == "on_close":
code = self.__on_close(json_req)
else:
trace("invalid request action: %s"%(json_req["action"]))
code = Error.request_invalid_action
return str(code)
def OPTIONS(self):
enable_crossdomain()
def __on_connect(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s, pageUrl=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"], req["pageUrl"]
))
# TODO: process the on_connect event
return code
def __on_close(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"]
))
# TODO: process the on_close event
return code
'''
handle the streams requests: publish/unpublish stream.
'''
class RESTStreams(object):
exposed = True
def GET(self):
enable_crossdomain()
streams = {}
return json.dumps(streams)
'''
for SRS hook: on_publish/on_unpublish
on_publish:
when client(encoder) publish to vhost/app/stream, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_publish",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live",
"stream": "livestream"
}
on_unpublish:
when client(encoder) stop publish to vhost/app/stream, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_unpublish",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live",
"stream": "livestream"
}
if valid, the hook must return HTTP code 200(Stauts OK) and response
an int value specifies the error code(0 corresponding to success):
0
'''
def POST(self):
enable_crossdomain()
# return the error code in str
code = Error.success
req = cherrypy.request.body.read()
trace("post to streams, req=%s"%(req))
try:
json_req = json.loads(req)
except Exception, ex:
code = Error.system_parse_json
trace("parse the request to json failed, req=%s, ex=%s, code=%s"%(req, ex, code))
return str(code)
action = json_req["action"]
if action == "on_publish":
code = self.__on_publish(json_req)
elif action == "on_unpublish":
code = self.__on_unpublish(json_req)
else:
trace("invalid request action: %s"%(json_req["action"]))
code = Error.request_invalid_action
return str(code)
def OPTIONS(self):
enable_crossdomain()
def __on_publish(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s, stream=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"], req["stream"]
))
# TODO: process the on_publish event
return code
def __on_unpublish(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s, stream=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"], req["stream"]
))
# TODO: process the on_unpublish event
return code
'''
handle the sessions requests: client play/stop stream
'''
class RESTSessions(object):
exposed = True
def GET(self):
enable_crossdomain()
sessions = {}
return json.dumps(sessions)
'''
for SRS hook: on_play/on_stop
on_play:
when client(encoder) publish to vhost/app/stream, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_play",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live",
"stream": "livestream"
}
on_stop:
when client(encoder) stop publish to vhost/app/stream, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_stop",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live",
"stream": "livestream"
}
if valid, the hook must return HTTP code 200(Stauts OK) and response
an int value specifies the error code(0 corresponding to success):
0
'''
def POST(self):
enable_crossdomain()
# return the error code in str
code = Error.success
req = cherrypy.request.body.read()
trace("post to sessions, req=%s"%(req))
try:
json_req = json.loads(req)
except Exception, ex:
code = Error.system_parse_json
trace("parse the request to json failed, req=%s, ex=%s, code=%s"%(req, ex, code))
return str(code)
action = json_req["action"]
if action == "on_play":
code = self.__on_play(json_req)
elif action == "on_stop":
code = self.__on_stop(json_req)
else:
trace("invalid request action: %s"%(json_req["action"]))
code = Error.request_invalid_action
return str(code)
def OPTIONS(self):
enable_crossdomain()
def __on_play(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s, stream=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"], req["stream"]
))
# TODO: process the on_play event
return code
def __on_stop(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s, stream=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"], req["stream"]
))
# TODO: process the on_stop event
return code
# HTTP RESTful path.
class Root(object):
def __init__(self):
self.api = Api()
# HTTP RESTful path.
class Api(object):
def __init__(self):
self.v1 = V1()
# HTTP RESTful path. to access as:
# http://127.0.0.1:8085/api/v1/clients
class V1(object):
def __init__(self):
self.clients = RESTClients()
self.streams = RESTStreams()
self.sessions = RESTSessions()
'''
main code start.
'''
# donot support use this module as library.
if __name__ != "__main__":
raise Exception("embed not support")
# check the user options
if len(sys.argv) <= 1:
print "SRS api callback server, Copyright (c) 2013 winlin"
print "Usage: python %s <port>"%(sys.argv[0])
print " port: the port to listen at."
print "For example:"
print " python %s 8085"%(sys.argv[0])
print ""
print "See also: https://github.com/winlinvip/simple-rtmp-server"
sys.exit(1)
# parse port from user options.
port = int(sys.argv[1])
trace("api server listen at port: %s"%(port))
# cherrypy config.
conf = {
'global': {
'server.shutdown_timeout': 1,
'server.socket_host': '0.0.0.0',
'server.socket_port': port,
'tools.encode.on': True,
'tools.encode.encoding': "utf-8"
},
'/': {
# for cherrypy RESTful api support
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
}
}
# start cherrypy web engine
trace("start cherrypy server")
cherrypy.quickstart(Root(), '/', conf)
| |
# coding: utf-8
import json
import logging
from json.decoder import JSONDecodeError
from typing import Optional, Generator, List, Union, Any
from django.utils.module_loading import import_string
from modernrpc.conf import settings
from modernrpc.core import Protocol, RPCRequestContext
from modernrpc.exceptions import (
RPCParseError,
RPC_INTERNAL_ERROR,
RPCException,
RPCInvalidRequest,
RPC_INVALID_REQUEST,
)
from modernrpc.handlers.base import RPCHandler, SuccessResult, ErrorResult
logger = logging.getLogger(__name__)
REQUEST_ID = "_jsonrpc_request_id"
VERSION = "_jsonrpc_request_version"
class JsonRpcDataMixin:
"""Wraps JSON-RPC specific information used to handle requests"""
request_id = None # type: Optional[int]
version = "2.0" # type: str
# Note: In some case, it is possible to have a null request_id on standard request (not a notification), when an
# error appear on request parsing. Thus, the result must be sent with an "id" parameter set to None (null in JSON)
is_notification = False
def set_jsonrpc_data(self, request_id, version, is_notification=False):
self.request_id = request_id
self.version = version
self.is_notification = is_notification
class JsonSuccessResult(JsonRpcDataMixin, SuccessResult):
"""A JSON-RPC success result"""
def format(self):
return {
"result": self.data,
}
class JsonErrorResult(JsonRpcDataMixin, ErrorResult):
"""A JSON-RPC error result. Allows setting additional data, as specified in standard"""
def __init__(self, code: int, message: str, data: Any = None):
super().__init__(code, message)
self.data = data
def format(self):
_part = {
"error": {
"code": self.code,
"message": self.message,
}
}
if self.data:
_part["error"]["data"] = self.data
return _part
# Alias to simplify typehints in JSONRPCHandler methods
JsonResult = Union[JsonSuccessResult, JsonErrorResult]
class JSONRPCHandler(RPCHandler):
"""Default JSON-RPC handler implementation"""
protocol = Protocol.JSON_RPC
def __init__(self, entry_point):
super().__init__(entry_point)
self.decoder = import_string(settings.MODERNRPC_JSON_DECODER)
self.encoder = import_string(settings.MODERNRPC_JSON_ENCODER)
@staticmethod
def valid_content_types() -> List[str]:
return [
"application/json",
"application/json-rpc",
"application/jsonrequest",
]
def process_request(self, request_body: str, context: RPCRequestContext) -> str:
"""
Parse request and process it, according to its kind. Standard request as well as batch request is supported.
Delegates to `process_single_request()` to perform most of the checks and procedure execution. Depending on the
result of `parse_request()`, standard or batch request will be handled here.
"""
try:
parsed_request = self.parse_request(request_body)
except RPCException as exc:
logger.error(exc, exc_info=settings.MODERNRPC_LOG_EXCEPTIONS)
return self.dumps_result(JsonErrorResult(exc.code, exc.message))
else:
# Parsed request is a list, we should handle it as batch request
if isinstance(parsed_request, list):
# Process each request, getting the resulting JsonResult instance (success or error)
results = (
self.process_single_request(_req, context)
for _req in parsed_request
) # type: Generator[JsonResult, None, None]
# Transform each result into its str result representation and remove notifications result
str_results = (
self.dumps_result(_res)
for _res in results
if not _res.is_notification
) # type: Generator[str, None, None]
# Rebuild final JSON content manually
concatenated_results = ", ".join(str_results)
# Return JSON-serialized response list, or empty string for notifications-only request
return "[%s]" % concatenated_results if concatenated_results else ""
# By default, handle a standard single request
return self.dumps_result(
self.process_single_request(parsed_request, context)
)
def parse_request(self, request_body: str) -> Union[dict, List[dict]]:
"""
Parse request body and return deserialized payload, or raise an RPCParseError
Returned payload may be a dict (for standard request) or a list of dicts (for batch request).
"""
try:
payload = json.loads(request_body, cls=self.decoder)
except (JSONDecodeError, Exception) as exc:
raise RPCParseError(
"Error while parsing JSON-RPC request: {}".format(exc)
) from exc
return payload
def process_single_request(
self, request_data: dict, context: RPCRequestContext
) -> JsonResult:
"""Check and call the RPC method, based on given request dict."""
if not isinstance(request_data, dict):
error_msg = (
'Invalid JSON-RPC payload, expected "object", found "{}"'.format(
type(request_data).__name__
)
)
return JsonErrorResult(RPC_INVALID_REQUEST, error_msg)
# Retrieve method name, and get corresponding RPCMethod instance.
# Raises RPCInvalidMethod as early as possible when not found
method_name = request_data.get("method")
# ...
params = request_data.get("params")
args = params if isinstance(params, (list, tuple)) else []
kwargs = params if isinstance(params, dict) else {}
is_notification = "id" not in request_data
request_id = request_data.get("id")
jsonrpc_version = request_data.get("jsonrpc")
try:
# Perform standard error checks
if not method_name:
raise RPCInvalidRequest('Missing parameter "method"')
if not jsonrpc_version:
raise RPCInvalidRequest('Missing parameter "jsonrpc"')
if jsonrpc_version != "2.0":
raise RPCInvalidRequest(
'Parameter "jsonrpc" has an unsupported value "{}". It must be set to "2.0"'.format(
jsonrpc_version
)
)
if not is_notification and request_id is None:
raise RPCInvalidRequest(
'Parameter "id" has an unsupported "null" value. It must be set to a positive integer value, '
'or must be completely removed from request payload for special "notification" requests'
)
_method = self.get_method_wrapper(method_name)
result_data = _method.execute(context, args, kwargs)
result = JsonSuccessResult(result_data) # type: JsonResult
except RPCException as exc:
logger.warning(exc, exc_info=settings.MODERNRPC_LOG_EXCEPTIONS)
result = JsonErrorResult(exc.code, exc.message, exc.data)
except Exception as exc:
logger.error(exc, exc_info=settings.MODERNRPC_LOG_EXCEPTIONS)
result = JsonErrorResult(RPC_INTERNAL_ERROR, str(exc))
result.set_jsonrpc_data(
request_id=request_id,
version=jsonrpc_version,
is_notification=is_notification,
)
return result
def dumps_result(self, result: JsonResult) -> str: # type: ignore[override]
"""
Dumps result instance into a proper JSON-RPC response
Notifications are supported here, based on result's `is_notification` member: return an empty string
"""
if result.is_notification:
return ""
# First, define the ...
result_base = {
"id": result.request_id,
"jsonrpc": result.version,
}
try:
return json.dumps({**result_base, **result.format()}, cls=self.encoder)
except Exception as exc:
# Error on result serialization: serialize an error instead
error_msg = "Unable to serialize result: {}".format(exc)
logger.error(error_msg, exc_info=settings.MODERNRPC_LOG_EXCEPTIONS)
error_result = JsonErrorResult(RPC_INTERNAL_ERROR, error_msg)
return json.dumps(
{**result_base, **error_result.format()}, cls=self.encoder
)
| |
# standard imports
import sys
import logging
import importlib
from pathlib import Path
# toolbox imports
from .busy import BusyObservable, busy
# logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Metaclass(type):
"""A metaclass for the :py:class:`Resource` class. It assigns a unique
identifier to each instance of this class and collects all object
of the instance class(es), allowing iteration and index access.
"""
_base_class = None
def __new__(cls, clsname, superclasses, attributedict, **kwargs):
"""Create a new instance class of this :py:class:`Metaclass`.
"""
logger.debug("Creating new instance class of meta class"
f"{cls.__name__}: {clsname} "
f"(superclasses: {superclasses})")
new_class = type.__new__(cls, clsname, superclasses, attributedict,
**kwargs)
if new_class._base_class is None:
original_init = new_class.__init__
def init_wrapper(self, id: str, **kwargs) -> None:
self._id = id
original_init(self, **kwargs)
def repr(self) -> str:
return self._id
new_class._instances = {}
new_class.__init__ = init_wrapper
new_class.__repr__ = repr
new_class._base_class = new_class
logger.debug(f"Added instance dictionary to class {clsname}")
return new_class
def __call__(cls, id: str=None, *args, **kwargs):
"""Create a new object of the instance class. The new object
will get an id and will be inserted into the dictionary
of instances of the base class.
Attributes
----------
id: str
The unique identifier of the instance. If no id is provided,
a generic id will be generated.
Raises
------
RuntimeError:
If the id is already in ues.
"""
if id is None:
id = cls._base_class.__name__ + str(len(cls._instances))
if id in cls._base_class._instances:
raise RuntimeError(f"Ambigouos use of {cls._base_class.__name__} "
f" identifier '{id}'.")
instance = super(Metaclass, cls).__call__(id=id, *args, **kwargs)
cls._base_class._instances[id] = instance
return instance
def __getitem__(cls, id):
instance = cls._base_class._instances[id]
if not isinstance(instance, cls):
raise TypeError(f"{cls._base_class.__name__} '{id}' has "
"inappropriate type "
f"'{instance.__class__.__name__}' while it was "
f"accessd as '{cls.__name__}'.")
return instance
def __iter__(cls):
for instance in cls._base_class._instances.values():
if isinstance(instance, cls):
yield instance
def __len__(cls) -> int:
len = 0
for instance in cls._base_class._instances.values():
if isinstance(instance, cls):
len += 1
return len
class Resource(BusyObservable, method='resource_changed',
changes={'status_changed'}, metaclass=Metaclass):
"""A :py:class:`Resource` is everything that may be required by a tool.
This includes specific hardware, software, or data.
The class supports checking for availability, offers methods to
install or update a resource and provides information on the
resource.
"""
_label: str = None
_description: str = None
def __init__(self, label: str = None, description: str = None,
**kwargs) -> None:
"""Create a new resource.
Arguments
---------
name: str
A name for the :py:class:`Resource`. It should be globally
unique so that it can be used as an identify for this
:py:class:`Resource`.
description: str
A description of the resource. This text may be presented
to the user.
"""
super().__init__(**kwargs)
self.label = label
self.description = description
@property
def label(self) -> str:
return self._id if self._label is None else self._label
@label.setter
def label(self, label: str) -> None:
if label is not None:
self._label = label
elif self._label is not None:
del self._label
@property
def description(self) -> str:
return (("No description has been provided "
f"for Resource '{self.label}'.")
if self._description is None else self._description)
@description.setter
def description(self, description: str) -> None:
if description is not None:
self._description = description
elif self._description is not None:
del self._description
@property
def available(self) -> bool:
"""Check the availability of this :py:class:`Resource`. If True, the
resource can be prepared or used directly, otherwise it is
necessary needs to be requires installation.
"""
return True
def install(self) -> None:
"""Install the :py:class:`Resource`. After successful installation,
the :py:class:`Resource` should be :py:meth:`available` (but not
necessary :py:meth:`prepared`).
"""
raise NotImplementedError(f"Installation of resource '{self._id}' "
"is not implemented (yet), sorry!")
def update(self) -> None:
"""Updates an installed :py:class:`Resource`.
"""
raise NotImplementedError(f"Update of resource '{self._id}' "
"is not implemented (yet), sorry!")
@property
def prepared(self) -> bool:
"""Check if the :py:class:`Resource` has been prepared. If True, the
resource can be used directly otherwise some preparation is be
necessary, which may delay further operation.
"""
return False
def prepare(self, **kwargs):
"""Prepare this :py:class:`Resource`. This may require some time.
After preparation, the :py:class:`Resource` should be usable
without any delay.
"""
pass
class DummyResource(Resource):
pass
class ModuleResource(Resource):
"""
"""
_module: str = None
_conda: dict = None
_pip: str = None
_prefix: str = None
def __init__(self, *args, module: str = None, prefix: str = None,
conda: str = None, conda_channel: str='', **kwargs) -> None:
"""Initialize a new :py:class:`ModuleResource`.
Arguments
---------
module: str
The fully qualified module name. If none is provided,
the name of this resource will be used.
prefix: str
The namespace prefix to be used for the module.
conda: str
The conda name of this module.
conda_channel: str
The conda channel from which the module should
be installed. If none is provided, the default
channel will be used.
"""
super().__init__(*args, **kwargs)
self._module = self._id if module is None else module
self._prefix = prefix
if conda is not None:
self.add_conda_source(conda, channel=conda_channel)
def add_conda_source(self, name: str, channel: str='') -> None:
"""Add a conda source allowing to install the module.
Attributes
----------
name: str
The name of the conda package.
channel: str
The conda channel from which the package can be installed.
"""
if self._conda is None:
self._conda = {}
self._conda[channel] = name
@property
def prepared(self) -> bool:
"""Check, if this :py:class:`ModuleResource` was prepared,
that is, if the module was imported.
Returns
------
True if the module was already imported, False otherwise.
"""
return self._module in sys.modules
@busy("Import module")
def prepare(self):
"""Prepare this :py:class:`ModuleResource`, that is,
import the module.
"""
importlib.import_module(self._module)
self.change(status_changed=True)
@property
def available(self) -> bool:
"""Check if this :py:class:`ModuleResource` is installed.
"""
# It may happen that a module is loaded (in sys.modules), but
# does not provide a __spec__ attribute, or that this
# attribute is None. I these cases, importlib.util.find_spec
# raises a ValueError. Hence we check beforhand, if the module
# is loaded, which is sufficient for us (we do not need the
# module spec) und only refer to importlib in case it is not
# loaded.
if self.module in sys.modules:
return sys.modules[self.module] is not None
return importlib.util.find_spec(self.module) is not None
@property
def module(self) -> str:
return self._module
@property
def version(self) -> str:
"""Check the module version.
"""
if not self.prepared:
raise RuntimeError(f"Module '{self._module}' was not imported. "
"No version information available")
module = sys.modules[self.module]
if hasattr(module, '__version__'):
version = str(module.__version__)
elif self._module == "PyQt5":
version = module.QtCore.QT_VERSION_STR
else:
version = "loaded, no version"
return version
@busy("Install module")
def install(self, method: str='auto', channel: str=''):
"""Install this :py:class:`ModuleResource`
Arguments
---------
method: str
The installation method to use. Supported are
'pip' for installation with pip and 'conda' for
conda installation. 'auto' tries to autoamtically
determine the best installation method.
"""
# FIXME[todo]: as installation may take some time, it should
# be run in a background thread ...
# are we using conda?
# FIXME[todo]: provide a general function for this check
# FIXME[hack]: this does only work in conda environments,
# but not if the conda base environment is used
if 'CONDA_PREFIX' in os.environ and self._conda:
import subprocess
command = ['conda', 'install']
if channel in self._conda:
command += '-c', channel
command.append(self._conda[channel])
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
#stdout, stderr
# FIXME[todo]: there is a python 'conda' module
# which may be used ...
else:
# use pip
#
pass
self.change(status_changed=True)
class Installable(Resource, BusyObservable):
@property
def installed(self) -> bool:
return self._installed()
def _installed(self) -> bool:
"""A method to check if the py:class:`Installable` is installed.
This should be implemented by subclasses and do the actual test.
"""
@busy("installing")
def install(self) -> None:
"""Install the :py:class:`Installable`.
"""
self._install()
def _install(self) -> None:
"""Do the actuall installation. This method should be
implemented by subclasses.
"""
@busy("uninstalling")
def uninstall(self) -> None:
"""Uninstall the :py:class:`Installable`.
"""
self._uninstall()
def _uninstall(self) -> None:
"""Do the actuall deinstallation. This method should be
implemented by subclasses.
"""
class Package(Installable):
"""
"""
_name: str
_module_name: str
_pip_name: str
_conda_name: str
def __init__(self, name: str, module: str = None,
pip: str = None,
conda: str = None, **kwargs) -> None:
super().__init__(**kwargs)
self._name = name
self._module_name = module or name
self._pip_name = pip or name
self._conda_name = conda or name
def _installed(self) -> bool:
"""Check if the package is installed using `importlib`.
"""
return importlib.util.find_loader(self._module_name) is not None
def _install(self) -> None:
"""Do the actuall installation. This method should be
implemented by subclasses.
"""
def pip_install(self) -> None:
"""Install package via pip.
"""
def conda_install(self) -> None:
"""Install package with conda.
"""
class GitRepository(Installable):
"""
"""
# FIXME[todo]
class Downloadable(Installable):
"""
"""
_file: Path = None
_url: str = None
_checksum: str = None
class ResourceUser:
@classmethod
def add_class_resource(cls, resource: Resource) -> None:
"""Add a class resource, that is a resource to be used
by the class itself or all instances of the class.
A typical example would be a third-party software package.
"""
cls._resources.append(resource)
def add_instance_resource(self, resource: Resource) -> None:
"""Add an instance resource, that is a resource to be used by one
specific instances of the class. A typical example would be a
model file.
"""
self._resources.append(resource)
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._resources = []
def resources(self) -> Iterable[Resource]:
for resource in type(self)._resources:
yield resource
for resource in self._resources:
yield resource
def resources_available(self) -> bool:
for resource in self.resources():
if not resource.available():
return False
return True
def install_resources(self) -> None:
for resource in self.resources():
if isinstance(resource, Installable):
resource.install()
##############################################################################
Resource(id='test')
Resource(id='test2', label='abc')
Resource(id='test3', description='xyz')
DummyResource(description='abc')
ModuleResource(id='numpy', prefix='np', label="NumPy",
description="NumPy is the fundamental package for "
"scientific computing with Python.");
ModuleResource(id='tensorflow', prefix='tf', label="TensorFlow")
ModuleResource(id='keras', label="Keras")
ModuleResource(id='appsdir')
ModuleResource(id='matplotlib')
ModuleResource(id='opencv', module='cv2', label="OpenCV")
ModuleResource(id='caffe')
ModuleResource(id='qt', module='PyQt5', label="Qt")
ModuleResource(id='pycuda', conda='pycuda', conda_channel='lukepfister')
ModuleResource(id='lucid',
description="A collection of infrastructure and tools "
"for research in neural network interpretability.")
ModuleResource(id='imutils',
description='A series of convenience functions to make '
'basic image processing functions such as translation, '
'rotation, resizing, skeletonization, displaying '
'Matplotlib images, sorting contours, detecting edges, '
'and much more easier with OpenCV and both '
'Python 2.7 and Python 3.')
ModuleResource(id='dlib',
description='Dlib is a modern C++ toolkit containing '
'machine learning algorithms and tools '
'for creating complex software to solve real world problems.')
ModuleResource(id='ikkuna',
description='A tool for monitoring neural network training.')
ModuleResource(id='sklearn',
description='Machine Learning in Python.')
##############################################################################
| |
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from . import managers
from spectator.core.fields import NaturalSortField
from spectator.core.models import (
BaseRole,
SluggedModelMixin,
ThumbnailModelMixin,
thumbnail_upload_path,
TimeStampedModelMixin,
)
def publication_upload_path(instance, filename):
"""
This function is now only kept so that older migrations still work.
No longer needed since moving the Publication.thumbnail field to the
ThumbnailModelMixin.
2020-04-07
"""
return thumbnail_upload_path(instance, filename)
class PublicationSeries(TimeStampedModelMixin, SluggedModelMixin, models.Model):
"""
A way to group `Publication`s into series.
Get its Publications:
series.publication_set.all()
"""
title = models.CharField(
null=False,
blank=False,
max_length=255,
help_text="e.g. 'The London Review of Books'.",
)
title_sort = NaturalSortField(
"title",
max_length=255,
default="",
help_text="e.g. 'london review of books, the'.",
)
url = models.URLField(
null=False,
blank=True,
max_length=255,
verbose_name="URL",
help_text="e.g. 'https://www.lrb.co.uk/'.",
)
class Meta:
ordering = ("title_sort",)
verbose_name = "Publication series"
verbose_name_plural = "Publication series"
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse(
"spectator:reading:publicationseries_detail", kwargs={"slug": self.slug}
)
class PublicationRole(BaseRole):
"""
Linking a creator to a Publication, optionally via their role (e.g.
'Author', 'Editor', etc.)
"""
creator = models.ForeignKey(
"spectator_core.Creator",
blank=False,
on_delete=models.CASCADE,
related_name="publication_roles",
)
publication = models.ForeignKey(
"spectator_reading.Publication", on_delete=models.CASCADE, related_name="roles"
)
class Meta:
ordering = ("role_order", "role_name")
verbose_name = "Publication role"
class Publication(ThumbnailModelMixin, TimeStampedModelMixin, SluggedModelMixin):
"""
Get a Publication's creators:
publication = Publication.objects.get(pk=1)
# Just the creators:
for creator in publication.creators.all():
print(creator.name)
# Include their roles:
for role in publication.roles.all():
print(role.publication, role.creator, role.role_name)
Get its readings:
for reading in publication.reading_set.all():
print(reading.start_date, reading.end_date)
"""
class Kind(models.TextChoices):
BOOK = "book", "Book"
PERIODICAL = "periodical", "Periodical"
title = models.CharField(
null=False,
blank=False,
max_length=255,
help_text="e.g. 'Aurora' or 'Vol. 39 No. 4, 16 February 2017'.",
)
title_sort = NaturalSortField(
"title",
max_length=255,
default="",
help_text="e.g. 'clockwork orange, a' or 'world cities, the'.",
)
series = models.ForeignKey(
"spectator_reading.PublicationSeries",
blank=True,
null=True,
on_delete=models.SET_NULL,
)
kind = models.CharField(max_length=20, choices=Kind.choices, default=Kind.BOOK)
official_url = models.URLField(
null=False,
blank=True,
max_length=255,
verbose_name="Official URL",
help_text="Official URL for this book/issue.",
)
isbn_uk = models.CharField(
null=False,
blank=True,
max_length=20,
verbose_name="UK ISBN",
help_text="e.g. '0356500489'.",
)
isbn_us = models.CharField(
null=False,
blank=True,
max_length=20,
verbose_name="US ISBN",
help_text="e.g. '0316098094'.",
)
notes_url = models.URLField(
null=False,
blank=True,
max_length=255,
verbose_name="Notes URL",
help_text="URL of your notes/review.",
)
creators = models.ManyToManyField(
"spectator_core.Creator", through="PublicationRole", related_name="publications"
)
# Managers
objects = models.Manager()
# Publications that are currently being read:
in_progress_objects = managers.InProgressPublicationsManager()
# Publications that haven't been started (have no Readings):
unread_objects = managers.UnreadPublicationsManager()
class Meta:
ordering = ("title_sort",)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse(
"spectator:reading:publication_detail", kwargs={"slug": self.slug}
)
def get_current_reading(self):
try:
return self.reading_set.filter(end_date__isnull=True)[0]
except IndexError:
pass
@property
def amazon_uk_url(self):
url = ""
if self.isbn_uk:
url = "https://www.amazon.co.uk/gp/product/{}/".format(self.isbn_uk)
if (
hasattr(settings, "SPECTATOR_AMAZON")
and "uk" in settings.SPECTATOR_AMAZON
):
url = "{}?tag={}".format(url, settings.SPECTATOR_AMAZON["uk"])
return url
@property
def amazon_us_url(self):
url = ""
if self.isbn_us:
url = "https://www.amazon.com/dp/{}/".format(self.isbn_us)
if (
hasattr(settings, "SPECTATOR_AMAZON")
and "us" in settings.SPECTATOR_AMAZON
):
url = "{}?tag={}".format(url, settings.SPECTATOR_AMAZON["us"])
return url
@property
def amazon_urls(self):
urls = []
if self.isbn_uk:
urls.append(
{"url": self.amazon_uk_url, "name": "Amazon.co.uk", "country": "UK"}
)
if self.isbn_us:
urls.append(
{"url": self.amazon_us_url, "name": "Amazon.com", "country": "USA"}
)
return urls
@property
def has_urls(self):
"Handy for templates."
if self.isbn_uk or self.isbn_us or self.official_url or self.notes_url:
return True
else:
return False
class Reading(TimeStampedModelMixin, models.Model):
"""
A period when a Publication was read.
"""
class DateGranularity(models.IntegerChoices):
# Via https://www.flickr.com/services/api/misc.dates.html
# SECOND = 0, "Y-m-d H:i:s"
DAY = 3, "Y-m-d"
MONTH = 4, "Y-m"
YEAR = 6, "Y"
# CIRCA = 8, "Circa..."
publication = models.ForeignKey(
"spectator_reading.Publication",
null=False,
blank=False,
on_delete=models.CASCADE,
)
start_date = models.DateField(null=True, blank=True)
start_granularity = models.PositiveSmallIntegerField(
null=False,
blank=False,
default=DateGranularity.DAY,
choices=DateGranularity.choices,
)
end_date = models.DateField(null=True, blank=True)
end_granularity = models.PositiveSmallIntegerField(
null=False,
blank=False,
default=DateGranularity.DAY,
choices=DateGranularity.choices,
)
is_finished = models.BooleanField(
default=False, help_text="Did you finish the publication?"
)
objects = managers.EndDateAscendingReadingsManager()
objects_desc = managers.EndDateDescendingReadingsManager()
def __str__(self):
return "{} ({} to {})".format(self.publication, self.start_date, self.end_date)
def clean(self):
if self.start_date and self.end_date and self.start_date > self.end_date:
raise ValidationError(
"A Reading's end date can't be before its start date."
)
| |
#!/usr/bin/env python
import unittest
from test import support
from test.test_urllib2 import sanepathname2url
import os
import socket
import sys
import urllib.error
import urllib.request
def _retry_thrice(func, exc, *args, **kwargs):
for i in range(3):
try:
return func(*args, **kwargs)
except exc as e:
last_exc = e
continue
except:
raise
raise last_exc
def _wrap_with_retry_thrice(func, exc):
def wrapped(*args, **kwargs):
return _retry_thrice(func, exc, *args, **kwargs)
return wrapped
# Connecting to remote hosts is flaky. Make it more robust by retrying
# the connection several times.
_urlopen_with_retry = _wrap_with_retry_thrice(urllib.request.urlopen,
urllib.error.URLError)
class AuthTests(unittest.TestCase):
"""Tests urllib2 authentication features."""
## Disabled at the moment since there is no page under python.org which
## could be used to HTTP authentication.
#
# def test_basic_auth(self):
# import http.client
#
# test_url = "http://www.python.org/test/test_urllib2/basic_auth"
# test_hostport = "www.python.org"
# test_realm = 'Test Realm'
# test_user = 'test.test_urllib2net'
# test_password = 'blah'
#
# # failure
# try:
# _urlopen_with_retry(test_url)
# except urllib2.HTTPError, exc:
# self.assertEqual(exc.code, 401)
# else:
# self.fail("urlopen() should have failed with 401")
#
# # success
# auth_handler = urllib2.HTTPBasicAuthHandler()
# auth_handler.add_password(test_realm, test_hostport,
# test_user, test_password)
# opener = urllib2.build_opener(auth_handler)
# f = opener.open('http://localhost/')
# response = _urlopen_with_retry("http://www.python.org/")
#
# # The 'userinfo' URL component is deprecated by RFC 3986 for security
# # reasons, let's not implement it! (it's already implemented for proxy
# # specification strings (that is, URLs or authorities specifying a
# # proxy), so we must keep that)
# self.assertRaises(http.client.InvalidURL,
# urllib2.urlopen, "http://evil:thing@example.com")
class CloseSocketTest(unittest.TestCase):
def test_close(self):
import socket, http.client, gc
# calling .close() on urllib2's response objects should close the
# underlying socket
response = _urlopen_with_retry("http://www.python.org/")
sock = response.fp
self.assert_(not sock.closed)
response.close()
self.assert_(sock.closed)
class OtherNetworkTests(unittest.TestCase):
def setUp(self):
if 0: # for debugging
import logging
logger = logging.getLogger("test_urllib2net")
logger.addHandler(logging.StreamHandler())
# XXX The rest of these tests aren't very good -- they don't check much.
# They do sometimes catch some major disasters, though.
def test_ftp(self):
urls = [
'ftp://ftp.kernel.org/pub/linux/kernel/README',
'ftp://ftp.kernel.org/pub/linux/kernel/non-existent-file',
#'ftp://ftp.kernel.org/pub/leenox/kernel/test',
'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC'
'/research-reports/00README-Legal-Rules-Regs',
]
self._test_urls(urls, self._extra_handlers())
def test_file(self):
TESTFN = support.TESTFN
f = open(TESTFN, 'w')
try:
f.write('hi there\n')
f.close()
urls = [
'file:' + sanepathname2url(os.path.abspath(TESTFN)),
('file:///nonsensename/etc/passwd', None,
urllib.error.URLError),
]
self._test_urls(urls, self._extra_handlers(), retry=True)
finally:
os.remove(TESTFN)
# XXX Following test depends on machine configurations that are internal
# to CNRI. Need to set up a public server with the right authentication
# configuration for test purposes.
## def test_cnri(self):
## if socket.gethostname() == 'bitdiddle':
## localhost = 'bitdiddle.cnri.reston.va.us'
## elif socket.gethostname() == 'bitdiddle.concentric.net':
## localhost = 'localhost'
## else:
## localhost = None
## if localhost is not None:
## urls = [
## 'file://%s/etc/passwd' % localhost,
## 'http://%s/simple/' % localhost,
## 'http://%s/digest/' % localhost,
## 'http://%s/not/found.h' % localhost,
## ]
## bauth = HTTPBasicAuthHandler()
## bauth.add_password('basic_test_realm', localhost, 'jhylton',
## 'password')
## dauth = HTTPDigestAuthHandler()
## dauth.add_password('digest_test_realm', localhost, 'jhylton',
## 'password')
## self._test_urls(urls, self._extra_handlers()+[bauth, dauth])
def _test_urls(self, urls, handlers, retry=True):
import socket
import time
import logging
debug = logging.getLogger("test_urllib2").debug
urlopen = urllib.request.build_opener(*handlers).open
if retry:
urlopen = _wrap_with_retry_thrice(urlopen, urllib.error.URLError)
for url in urls:
if isinstance(url, tuple):
url, req, expected_err = url
else:
req = expected_err = None
debug(url)
try:
f = urlopen(url, req)
except EnvironmentError as err:
debug(err)
if expected_err:
msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" %
(expected_err, url, req, type(err), err))
self.assert_(isinstance(err, expected_err), msg)
else:
with support.time_out, \
support.socket_peer_reset, \
support.ioerror_peer_reset:
buf = f.read()
f.close()
debug("read %d bytes" % len(buf))
debug("******** next url coming up...")
time.sleep(0.1)
def _extra_handlers(self):
handlers = []
cfh = urllib.request.CacheFTPHandler()
cfh.setTimeout(1)
handlers.append(cfh)
return handlers
class TimeoutTest(unittest.TestCase):
def test_http_basic(self):
self.assertTrue(socket.getdefaulttimeout() is None)
u = _urlopen_with_retry("http://www.python.org")
self.assertTrue(u.fp.raw._sock.gettimeout() is None)
def test_http_default_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry("http://www.python.org")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.raw._sock.gettimeout(), 60)
def test_http_no_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry("http://www.python.org", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(u.fp.raw._sock.gettimeout() is None)
def test_http_timeout(self):
u = _urlopen_with_retry("http://www.python.org", timeout=120)
self.assertEqual(u.fp.raw._sock.gettimeout(), 120)
FTP_HOST = "ftp://ftp.mirror.nl/pub/mirror/gnu/"
def test_ftp_basic(self):
self.assertTrue(socket.getdefaulttimeout() is None)
u = _urlopen_with_retry(self.FTP_HOST)
self.assertTrue(u.fp.fp.raw._sock.gettimeout() is None)
def test_ftp_default_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
def test_ftp_no_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(u.fp.fp.raw._sock.gettimeout() is None)
def test_ftp_timeout(self):
u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
def test_main():
support.requires("network")
support.run_unittest(AuthTests,
OtherNetworkTests,
CloseSocketTest,
TimeoutTest,
)
if __name__ == "__main__":
test_main()
| |
## Automatically adapted for numpy.oldnumeric May 17, 2011 by -c
import numpy.oldnumeric as Numeric
def dot(array1, array2):
try: return array1.dot(array2)
except AttributeError:
try: return array2.dot(array1)
except AttributeError:
return Numeric.dot(array1, array2)
def sum(array):
try:
assignments = array.assignments()
s = array.default() * (len(array) - len(assignments))
for index, value in assignments:
s += value
return s
except AttributeError:
return Numeric.sum(array)
class SparseArray:
def __init__(self, dimensions, default=0, assignments=None):
self._default = default
if assignments:
self._assignments = assignments.copy()
else:
self._assignments = {}
try:
self._n = len(dimensions)
self._assignments = {}
for i in range(self._n):
value = dimensions[i]
if value != self._default:
self._assignments[i] = value
except TypeError:
self._n = dimensions
def assignments(self):
return self._assignments.items()
def default(self):
return self._default
def as_dense_array(self):
return Numeric.array(self)
def dot(self, other):
assert len(self) == len(other)
v = 0
try:
indices = dict(self._assignments)
indices.update(other._assignments)
v += (len(self) - len(indices)) * (self._default * other._default)
for index in indices.keys():
v += self[index] * other[index]
except AttributeError:
for i in range(len(self)):
v += self[i] * other[i]
return v
def matrix_multiply(self, matrix):
shape = matrix.shape
assert shape[0] == len(self)
v = SparseArray(self._n, self._default)
for i in range(len(self)):
v[i] = self.dot(matrix[:,i])
return v
def matrix_multiply_transpose(self, matrix):
shape = matrix.shape
assert shape[1] == len(self)
v = SparseArray(self._n, self._default)
for i in range(len(self)):
v[i] = self.dot(matrix[i])
return v
def as_dense_array(self):
return Numeric.array(self)
def add_to(self, dense_array):
if self._default:
for index in range(len(self)):
dense_array[index] += self[index]
else:
for index, value in self._assignments.items():
dense_array[index] += value
def subtract_from(self, dense_array):
if self._default:
for index in range(len(self)):
dense_array[index] -= self[index]
else:
for index, value in self._assignments.items():
dense_array[index] -= value
def __len__(self):
return self._n
def __getitem__(self, i):
if i >= 0 and i < self._n:
return self._assignments.get(i, self._default)
else:
raise IndexError('index out of range')
def __setitem__(self, i, v):
if i >= 0 and i < self._n:
if v != self._default:
self._assignments[i] = v
elif self._assignments.has_key(i):
del self._assignments[i]
else:
raise IndexError('index out of range')
def __add__(self, other):
try:
assert len(self) == len(other)
try:
# do an element-wise addition using sparse vectors
v = SparseArray(self._n, self._default + other._default)
indices = dict(self._assignments)
indices.update(other._assignments)
for index in indices.keys():
v[index] = self[index] + other[index]
except AttributeError:
# do an element-wise addition with a sequence
# [FIXME] - v prolly shouldn't be sparse, but need to [:] or
# copy to interface with sequences and Numeric arrays
v = SparseArray(self._n, self._default)
for index in range(len(self)):
v[index] = self[index] + other[index]
except TypeError:
# addition with a scalar
v = SparseArray(self._n, self._default + other)
for index, value in self._assignments.items():
v[index] = self[index] + other
return v
def __div__(self, other):
try:
assert len(self) == len(other)
try:
# do an element-wise division using sparse vectors
v = SparseArray(self._n, self._default / other._default)
indices = dict(self._assignments)
indices.update(other._assignments)
for index in indices.keys():
v[index] = self[index] / other[index]
except AttributeError:
# do an element-wise division with a sequence
# [FIXME] - v prolly shouldn't be sparse, but need to [:] or
# copy to interface with sequences and Numeric arrays
v = SparseArray(self._n, self._default)
for index in range(len(self)):
v[index] = self[index] / other[index]
except TypeError:
# divide by a scalar
v = SparseArray(self._n, self._default / other)
for index, value in self._assignments.items():
v[index] = self[index] / other
return v
def __mul__(self, other):
try:
assert len(self) == len(other)
try:
# do an element-wise multiplication using sparse vectors
v = SparseArray(self._n, self._default * other._default)
indices = dict(self._assignments)
indices.update(other._assignments)
for index in indices.keys():
v[index] = self[index] * other[index]
except AttributeError:
# do an element-wise multiplication with a sequence
# [FIXME] - v prolly shouldn't be sparse, but need to [:] or
# copy to interface with sequences and Numeric arrays
v = SparseArray(self._n, self._default)
for index in range(len(self)):
v[index] = self[index] * other[index]
except TypeError:
# multiply by a scalar
v = SparseArray(self._n, self._default * other)
for index, value in self._assignments.items():
v[index] = self[index] * other
return v
def __sub__(self, other):
try:
assert len(self) == len(other)
try:
# do an element-wise subtraction using sparse vectors
v = SparseArray(self._n, self._default - other._default)
indices = dict(self._assignments)
indices.update(other._assignments)
for index in indices.keys():
v[index] = self[index] - other[index]
except AttributeError:
# do an element-wise subtraction with a sequence
# [FIXME] - v prolly shouldn't be sparse, but need to [:] or
# copy to interface with sequences and Numeric arrays
v = SparseArray(self._n, self._default)
for index in range(len(self)):
v[index] = self[index] - other[index]
except TypeError:
# subtract a scalar
v = SparseArray(self._n, self._default - other)
for index, value in self._assignments.items():
v[index] = self[index] - other
return v
def __pow__(self, other):
try:
assert len(self) == len(other)
try:
# do an element-wise power using sparse vectors
v = SparseArray(self._n, self._default ** other._default)
indices = dict(self._assignments)
indices.update(other._assignments)
for index in indices.keys():
v[index] = self[index] - other[index]
except AttributeError:
# do an element-wise power with a sequence
# [FIXME] - v prolly shouldn't be sparse, but need to [:] or
# copy to interface with sequences and Numeric arrays
v = SparseArray(self._n, self._default)
for index in range(len(self)):
v[index] = self[index] ** other[index]
except TypeError:
# to power of a scalar
v = SparseArray(self._n, self._default ** other)
for index, value in self._assignments.items():
v[index] = self[index] ** other
return v
def __iadd__(self, other):
try:
assert len(self) == len(other)
try:
# do an element-wise addition using sparse vectors
self._default += other._default
indices = dict(self._assignments)
indices.update(other._assignments)
for index in indices.keys():
self[index] += other[index]
except AttributeError:
# do an element-wise addition with a sequence
# [FIXME] - v prolly shouldn't be sparse, but need to [:] or
# copy to interface with sequences and Numeric arrays
for index in range(len(self)):
self[index] += other[index]
except TypeError:
# addition with a scalar
self._default += other
for index in self._assignments.keys():
self[index] += other
return self
def __isub__(self, other):
try:
assert len(self) == len(other)
try:
# do an element-wise subtraction using sparse vectors
self._default -= other._default
indices = dict(self._assignments)
indices.update(other._assignments)
for index in indices.keys():
self[index] -= other[index]
except AttributeError:
# do an element-wise subtraction with a sequence
# [FIXME] - v prolly shouldn't be sparse, but need to [:] or
# copy to interface with sequences and Numeric arrays
for index in range(len(self)):
self[index] -= other[index]
except TypeError:
# subtraction with a scalar
self._default -= other
for index in self._assignments.keys():
self[index] -= other
return self
def __imul__(self, other):
try:
assert len(self) == len(other)
try:
# do an element-wise multiplication using sparse vectors
self._default *= other._default
indices = dict(self._assignments)
indices.update(other._assignments)
for index in indices.keys():
self[index] *= other[index]
except AttributeError:
# do an element-wise multiplication with a sequence
# [FIXME] - v prolly shouldn't be sparse, but need to [:] or
# copy to interface with sequences and Numeric arrays
for index in range(len(self)):
self[index] *= other[index]
except TypeError:
# multiply by a scalar
self._default *= other
for index in self._assignments.keys():
self[index] *= other
return self
def __idiv__(self, other):
try:
assert len(self) == len(other)
try:
# do an element-wise division using sparse vectors
self._default /= other._default
indices = dict(self._assignments)
indices.update(other._assignments)
for index in indices.keys():
self[index] /= other[index]
except AttributeError:
# do an element-wise division with a sequence
# [FIXME] - v prolly shouldn't be sparse, but need to [:] or
# copy to interface with sequences and Numeric arrays
for index in range(len(self)):
self[index] /= other[index]
except TypeError:
# division by a scalar
self._default /= other
for index in self._assignments.keys():
self[index] /= other
return self
def __getslice__(self, start, end):
start = min(max(start, 0), self._n)
end = min(max(end, 0), self._n)
if end > start:
v = SparseArray(end - start, self._default)
for index in self._assignments.keys():
if start <= index < end:
v[index - start] = self._assignments[index]
return v
else:
return []
def __setslice__(self, start, end, values):
start = min(max(start, 0), self._n)
end = min(max(end, 0), self._n)
assert len(values) == end - start, 'Cannot resize vector'
for i in range(len(values)):
self[start + i] = values[i]
def __str__(self):
return '[%s]' % ', '.join(map(str, self))
def __repr__(self):
return 'SparseArray(%d, %s, %s)' % (self._n, self._default,
self._assignments)
__radd__ = __add__
__rmul__ = __mul__
__rdiv__ = __div__
# order of operations important for these
#__rsub__ = __sub__
#__rpow__ = __pow__
def test():
a = SparseArray(5, 1)
a[0] = 10
a[3] = 2
print str(a), `a`
a *= 2
print str(a), `a`
a += 2
print str(a), `a`
a /= range(1, 6)
print str(a), `a`
b = SparseArray(5, 3, {2:15})
a -= b
print str(a), `a`
print a.dot(b), b.dot(a)
print a.as_dense_array()
M = Numeric.array(range(25)).resize((5,5))
print a.matrix_multiply(M)
print Numeric.matrixmultiply(a.as_dense_array(), M)
print a.matrix_multiply_transpose(M)
print Numeric.matrixmultiply(M, a.as_dense_array())
c = SparseArray([1,2,3,4,5])
print c, `c`
if __name__ == '__main__':
test()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V1 metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import variables
def _labeled_dataset_fn():
# First four batches of x: labels, predictions -> (labels == predictions)
# 0: 0, 0 -> True; 1: 1, 1 -> True; 2: 2, 2 -> True; 3: 3, 0 -> False
# 4: 4, 1 -> False; 5: 0, 2 -> False; 6: 1, 0 -> False; 7: 2, 1 -> False
# 8: 3, 2 -> False; 9: 4, 0 -> False; 10: 0, 1 -> False; 11: 1, 2 -> False
# 12: 2, 0 -> False; 13: 3, 1 -> False; 14: 4, 2 -> False; 15: 0, 0 -> True
return dataset_ops.Dataset.range(1000).map(
lambda x: {"labels": x % 5, "predictions": x % 3}).batch(
4, drop_remainder=True)
def _boolean_dataset_fn():
# First four batches of labels, predictions: {TP, FP, TN, FN}
# with a threshold of 0.5:
# T, T -> TP; F, T -> FP; T, F -> FN
# F, F -> TN; T, T -> TP; F, T -> FP
# T, F -> FN; F, F -> TN; T, T -> TP
# F, T -> FP; T, F -> FN; F, F -> TN
return dataset_ops.Dataset.from_tensor_slices({
"labels": [True, False, True, False],
"predictions": [True, True, False, False]}).repeat().batch(
3, drop_remainder=True)
def _threshold_dataset_fn():
# First four batches of labels, predictions: {TP, FP, TN, FN}
# with a threshold of 0.5:
# True, 1.0 -> TP; False, .75 -> FP; True, .25 -> FN
# False, 0.0 -> TN; True, 1.0 -> TP; False, .75 -> FP
# True, .25 -> FN; False, 0.0 -> TN; True, 1.0 -> TP
# False, .75 -> FP; True, .25 -> FN; False, 0.0 -> TN
return dataset_ops.Dataset.from_tensor_slices({
"labels": [True, False, True, False],
"predictions": [1.0, 0.75, 0.25, 0.]}).repeat().batch(
3, drop_remainder=True)
def _regression_dataset_fn():
return dataset_ops.Dataset.from_tensor_slices({
"labels": [1., .5, 1., 0.],
"predictions": [1., .75, .25, 0.]}).repeat()
# TODO(priyag): Add TPU Strategy to this once metrics aggregate correctly using
# ReplicaLocalVariables on TPUs. Submit http://cl/208914352.
def all_combinations():
return combinations.combine(
distribution=[combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus],
mode=["graph"])
def tpu_combinations():
return combinations.combine(distribution=[combinations.tpu_strategy_one_step,
combinations.tpu_strategy],
mode=["graph"])
# TODO(josh11b): Test metrics.recall_at_top_k, metrics.average_precision_at_k,
# metrics.precision_at_k
class MetricsV1Test(test.TestCase, parameterized.TestCase):
def _test_metric(self, distribution, dataset_fn, metric_fn, expected_fn):
with ops.Graph().as_default(), distribution.scope():
iterator = distribution.distribute_dataset(
dataset_fn).make_initializable_iterator()
if isinstance(distribution, tpu_strategy.TPUStrategy):
def step_fn(ctx, inputs):
value, update = distribution.call_for_each_replica(
metric_fn, inputs)
ctx.set_non_tensor_output(name="value", output=value)
return distribution.group(update)
ctx = distribution.run_steps_on_dataset(
step_fn, iterator, iterations=distribution.steps_per_run)
update = ctx.run_op
value = ctx.non_tensor_outputs["value"]
# In each run, we run multiple steps, and each steps consumes as many
# batches as number of replicas.
batches_per_update = (
distribution.num_replicas * distribution.steps_per_run)
else:
value, update = distribution.call_for_each_replica(
metric_fn, iterator.get_next())
update = distribution.group(update)
# TODO(josh11b): Once we switch to using a global batch size for input,
# replace "distribution.num_replicas" with "1".
batches_per_update = distribution.num_replicas
self.evaluate(iterator.initializer)
self.evaluate(distribution.initialize())
self.evaluate(variables.local_variables_initializer())
batches_consumed = 0
for i in range(4):
self.evaluate(update)
batches_consumed += batches_per_update
self.assertAllClose(expected_fn(batches_consumed),
self.evaluate(value),
0.001,
msg="After update #" + str(i+1))
if batches_consumed >= 4: # Consume 4 input batches in total.
break
self.evaluate(distribution.finalize())
@combinations.generate(all_combinations() + tpu_combinations())
def testMean(self, distribution):
def _dataset_fn():
return dataset_ops.Dataset.range(1000).map(math_ops.to_float).batch(
4, drop_remainder=True)
def _expected_fn(num_batches):
# Mean(0..3) = 1.5, Mean(0..7) = 3.5, Mean(0..11) = 5.5, etc.
return num_batches * 2 - 0.5
self._test_metric(distribution, _dataset_fn, metrics.mean, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testAccuracy(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.accuracy(labels, predictions)
def _expected_fn(num_batches):
return [3./4, 3./8, 3./12, 4./16][num_batches - 1]
self._test_metric(
distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)
# TODO(priyag, jhseu): Enable TPU for this test once scatter_add is added
# for TPUMirroredVariable.
@combinations.generate(all_combinations())
def testMeanPerClassAccuracy(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.mean_per_class_accuracy(
labels, predictions, num_classes=5)
def _expected_fn(num_batches):
mean = lambda x: sum(x) / len(x)
return [mean([1., 1., 1., 0., 0.]),
mean([0.5, 0.5, 0.5, 0., 0.]),
mean([1./3, 1./3, 0.5, 0., 0.]),
mean([0.5, 1./3, 1./3, 0., 0.])][num_batches - 1]
self._test_metric(
distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)
# NOTE(priyag): This metric doesn't work on TPUs yet.
@combinations.generate(all_combinations())
def testMeanIOU(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.mean_iou(
labels, predictions, num_classes=5)
def _expected_fn(num_batches):
mean = lambda x: sum(x) / len(x)
return [mean([1./2, 1./1, 1./1, 0.]), # no class 4 in first batch
mean([1./4, 1./4, 1./3, 0., 0.]),
mean([1./6, 1./6, 1./5, 0., 0.]),
mean([2./8, 1./7, 1./7, 0., 0.])][num_batches - 1]
self._test_metric(
distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testMeanTensor(self, distribution):
def _dataset_fn():
dataset = dataset_ops.Dataset.range(1000).map(math_ops.to_float)
# Want to produce a fixed, known shape, so drop remainder when batching.
dataset = dataset.batch(4, drop_remainder=True)
return dataset
def _expected_fn(num_batches):
# Mean(0, 4, ..., 4 * num_batches - 4) == 2 * num_batches - 2
# Mean(1, 5, ..., 4 * num_batches - 3) == 2 * num_batches - 1
# Mean(2, 6, ..., 4 * num_batches - 2) == 2 * num_batches
# Mean(3, 7, ..., 4 * num_batches - 1) == 2 * num_batches + 1
first = 2. * num_batches - 2.
return [first, first + 1., first + 2., first + 3.]
self._test_metric(
distribution, _dataset_fn, metrics.mean_tensor, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testAUCROC(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.auc(labels, predictions, num_thresholds=8, curve="ROC",
summation_method="careful_interpolation")
def _expected_fn(num_batches):
return [0.5, 7./9, 0.8, 0.75][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testAUCPR(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.auc(labels, predictions, num_thresholds=8, curve="PR",
summation_method="careful_interpolation")
def _expected_fn(num_batches):
return [0.797267, 0.851238, 0.865411, 0.797267][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testFalseNegatives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_negatives(labels, predictions)
def _expected_fn(num_batches):
return [1., 1., 2., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testFalseNegativesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_negatives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[1.], [1.], [2.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testTrueNegatives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_negatives(labels, predictions)
def _expected_fn(num_batches):
return [0., 1., 2., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testTrueNegativesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_negatives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[0.], [1.], [2.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testFalsePositives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_positives(labels, predictions)
def _expected_fn(num_batches):
return [1., 2., 2., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testFalsePositivesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_positives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[1.], [2.], [2.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testTruePositives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_positives(labels, predictions)
def _expected_fn(num_batches):
return [1., 2., 3., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testTruePositivesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_positives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[1.], [2.], [3.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testPrecision(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.precision(labels, predictions)
def _expected_fn(num_batches):
return [0.5, 0.5, 0.6, 0.5][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testPrecisionAtThreshold(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.precision_at_thresholds(labels, predictions, [0.5])
def _expected_fn(num_batches):
return [[0.5], [0.5], [0.6], [0.5]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testRecall(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.recall(labels, predictions)
def _expected_fn(num_batches):
return [0.5, 2./3, 0.6, 0.5][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testRecallAtThreshold(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.recall_at_thresholds(labels, predictions, [0.5])
def _expected_fn(num_batches):
return [[0.5], [2./3], [0.6], [0.5]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testMeanSquaredError(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.mean_squared_error(labels, predictions)
def _expected_fn(num_batches):
return [0., 1./32, 0.208333, 0.15625][num_batches - 1]
self._test_metric(
distribution, _regression_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testRootMeanSquaredError(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.root_mean_squared_error(labels, predictions)
def _expected_fn(num_batches):
return [0., 0.176777, 0.456435, 0.395285][num_batches - 1]
self._test_metric(
distribution, _regression_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testSensitivityAtSpecificity(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.sensitivity_at_specificity(labels, predictions, 0.8)
def _expected_fn(num_batches):
return [0.5, 2./3, 0.6, 0.5][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testSpecificityAtSensitivity(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.specificity_at_sensitivity(labels, predictions, 0.95)
def _expected_fn(num_batches):
return [0., 1./3, 0.5, 0.5][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
if __name__ == "__main__":
test.main()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in image_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
@test_util.disable_all_xla("b/124289666") # align_corners=False unimplemented
class ResizeNearestNeighborOpTest(test.TestCase):
TYPES = [np.float32, np.float64]
def testShapeIsCorrectAfterOp(self):
in_shape = [1, 2, 2, 1]
out_shape = [1, 4, 6, 1]
for nptype in self.TYPES:
x = np.arange(0, 4).reshape(in_shape).astype(nptype)
with self.cached_session(use_gpu=True) as sess:
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_nearest_neighbor(input_tensor,
out_shape[1:3])
self.assertEqual(out_shape, list(resize_out.get_shape()))
resize_out = self.evaluate(resize_out)
self.assertEqual(out_shape, list(resize_out.shape))
@test_util.run_deprecated_v1
def testGradFromResizeToLargerInBothDims(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
for nptype in self.TYPES:
x = np.arange(0, 6).reshape(in_shape).astype(nptype)
with self.cached_session(use_gpu=True):
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_nearest_neighbor(input_tensor,
out_shape[1:3])
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradFromResizeToSmallerInBothDims(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
for nptype in self.TYPES:
x = np.arange(0, 24).reshape(in_shape).astype(nptype)
with self.cached_session(use_gpu=True):
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_nearest_neighbor(input_tensor,
out_shape[1:3])
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testCompareGpuVsCpu(self):
in_shape = [1, 4, 6, 3]
out_shape = [1, 8, 16, 3]
for nptype in self.TYPES:
x = np.arange(0, np.prod(in_shape)).reshape(in_shape).astype(nptype)
for align_corners in [True, False]:
with self.cached_session(use_gpu=False):
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_nearest_neighbor(
input_tensor, out_shape[1:3], align_corners=align_corners)
grad_cpu = gradient_checker.compute_gradient(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
with self.cached_session(use_gpu=True):
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_nearest_neighbor(
input_tensor, out_shape[1:3], align_corners=align_corners)
grad_gpu = gradient_checker.compute_gradient(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertAllClose(grad_cpu, grad_gpu, rtol=1e-5, atol=1e-5)
class ResizeBilinearOpTest(test.TestCase):
def testShapeIsCorrectAfterOp(self):
in_shape = [1, 2, 2, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 4).reshape(in_shape).astype(np.float32)
with self.cached_session() as sess:
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3])
self.assertEqual(out_shape, list(resize_out.get_shape()))
resize_out = self.evaluate(resize_out)
self.assertEqual(out_shape, list(resize_out.shape))
@test_util.run_deprecated_v1
def testGradFromResizeToLargerInBothDims(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3])
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradFromResizeToSmallerInBothDims(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape).astype(np.float32)
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3])
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/124290659") # align_corners=False unimplemented
def testCompareGpuVsCpu(self):
in_shape = [2, 4, 6, 3]
out_shape = [2, 8, 16, 3]
size = np.prod(in_shape)
x = 1.0 / size * np.arange(0, size).reshape(in_shape).astype(np.float32)
for align_corners in [True, False]:
grad = {}
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=in_shape)
resized_tensor = image_ops.resize_bilinear(
input_tensor, out_shape[1:3], align_corners=align_corners)
grad[use_gpu] = gradient_checker.compute_gradient(
input_tensor, in_shape, resized_tensor, out_shape, x_init_value=x)
self.assertAllClose(grad[False], grad[True], rtol=1e-4, atol=1e-4)
@test_util.run_deprecated_v1
def testTypes(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape)
with self.cached_session() as sess:
for dtype in [np.float16, np.float32, np.float64]:
input_tensor = constant_op.constant(x.astype(dtype), shape=in_shape)
resize_out = image_ops.resize_bilinear(input_tensor, out_shape[1:3])
grad = sess.run(gradients_impl.gradients(resize_out, input_tensor))[0]
self.assertAllEqual(in_shape, grad.shape)
# Not using gradient_checker.compute_gradient as I didn't work out
# the changes required to compensate for the lower precision of
# float16 when computing the numeric jacobian.
# Instead, we just test the theoretical jacobian.
self.assertAllEqual([[[[1.], [0.], [1.], [0.], [1.], [0.]], [[0.], [
0.
], [0.], [0.], [0.], [0.]], [[1.], [0.], [1.], [0.], [1.], [0.]],
[[0.], [0.], [0.], [0.], [0.], [0.]]]], grad)
class ResizeBicubicOpTest(test.TestCase):
def testShapeIsCorrectAfterOp(self):
in_shape = [1, 2, 2, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 4).reshape(in_shape).astype(np.float32)
for align_corners in [True, False]:
with self.cached_session() as sess:
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3],
align_corners=align_corners)
self.assertEqual(out_shape, list(resize_out.get_shape()))
resize_out = self.evaluate(resize_out)
self.assertEqual(out_shape, list(resize_out.shape))
@test_util.run_deprecated_v1
def testGradFromResizeToLargerInBothDims(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
for align_corners in [True, False]:
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3],
align_corners=align_corners)
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradFromResizeToSmallerInBothDims(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape).astype(np.float32)
for align_corners in [True, False]:
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3],
align_corners=align_corners)
err = gradient_checker.compute_gradient_error(
input_tensor, in_shape, resize_out, out_shape, x_init_value=x)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradOnUnsupportedType(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape).astype(np.uint8)
with self.cached_session():
input_tensor = constant_op.constant(x, shape=in_shape)
resize_out = image_ops.resize_bicubic(input_tensor, out_shape[1:3])
grad = gradients_impl.gradients(input_tensor, [resize_out])
self.assertEqual([None], grad)
class CropAndResizeOpTest(test.TestCase):
def testShapeIsCorrectAfterOp(self):
batch = 2
image_height = 3
image_width = 4
crop_height = 4
crop_width = 5
depth = 2
num_boxes = 2
image_shape = [batch, image_height, image_width, depth]
crop_size = [crop_height, crop_width]
crops_shape = [num_boxes, crop_height, crop_width, depth]
image = np.arange(0, batch * image_height * image_width *
depth).reshape(image_shape).astype(np.float32)
boxes = np.array([[0, 0, 1, 1], [.1, .2, .7, .8]], dtype=np.float32)
box_ind = np.array([0, 1], dtype=np.int32)
with self.session(use_gpu=True) as sess:
crops = image_ops.crop_and_resize(
constant_op.constant(
image, shape=image_shape),
constant_op.constant(
boxes, shape=[num_boxes, 4]),
constant_op.constant(
box_ind, shape=[num_boxes]),
constant_op.constant(
crop_size, shape=[2]))
self.assertEqual(crops_shape, list(crops.get_shape()))
crops = self.evaluate(crops)
self.assertEqual(crops_shape, list(crops.shape))
def _randomUniformAvoidAnchors(self, low, high, anchors, radius, num_samples):
"""Generate samples that are far enough from a set of anchor points.
We generate uniform samples in [low, high], then reject those that are less
than radius away from any point in anchors. We stop after we have accepted
num_samples samples.
Args:
low: The lower end of the interval.
high: The upper end of the interval.
anchors: A list of length num_crops with anchor points to avoid.
radius: Distance threshold for the samples from the anchors.
num_samples: How many samples to produce.
Returns:
samples: A list of length num_samples with the accepted samples.
"""
self.assertTrue(low < high)
self.assertTrue(radius >= 0)
num_anchors = len(anchors)
# Make sure that at least half of the interval is not forbidden.
self.assertTrue(2 * radius * num_anchors < 0.5 * (high - low))
anchors = np.reshape(anchors, num_anchors)
samples = []
while len(samples) < num_samples:
sample = np.random.uniform(low, high)
if np.all(np.fabs(sample - anchors) > radius):
samples.append(sample)
return samples
@test_util.run_deprecated_v1
def testGradRandomBoxes(self):
"""Test that the gradient is correct for randomly generated boxes.
The mapping is piecewise differentiable with respect to the box coordinates.
The points where the function is not differentiable are those which are
mapped to image pixels, i.e., the normalized y coordinates in
np.linspace(0, 1, image_height) and normalized x coordinates in
np.linspace(0, 1, image_width). Make sure that the box coordinates are
sufficiently far away from those rectangular grid centers that are points of
discontinuity, so that the finite difference Jacobian is close to the
computed one.
"""
np.random.seed(1) # Make it reproducible.
delta = 1e-3
radius = 2 * delta
low, high = -0.5, 1.5 # Also covers the case of extrapolation.
image_height = 4
for image_width in range(1, 3):
for crop_height in range(1, 3):
for crop_width in range(2, 4):
for depth in range(1, 3):
for num_boxes in range(1, 3):
batch = num_boxes
image_shape = [batch, image_height, image_width, depth]
crop_size = [crop_height, crop_width]
crops_shape = [num_boxes, crop_height, crop_width, depth]
boxes_shape = [num_boxes, 4]
image = np.arange(0, batch * image_height * image_width *
depth).reshape(image_shape).astype(np.float32)
boxes = []
for _ in range(num_boxes):
# pylint: disable=unbalanced-tuple-unpacking
y1, y2 = self._randomUniformAvoidAnchors(
low, high, np.linspace(0, 1, image_height), radius, 2)
x1, x2 = self._randomUniformAvoidAnchors(
low, high, np.linspace(0, 1, image_width), radius, 2)
# pylint: enable=unbalanced-tuple-unpacking
boxes.append([y1, x1, y2, x2])
boxes = np.array(boxes, dtype=np.float32)
box_ind = np.arange(batch, dtype=np.int32)
with self.cached_session(use_gpu=True):
image_tensor = constant_op.constant(image, shape=image_shape)
boxes_tensor = constant_op.constant(boxes, shape=[num_boxes, 4])
box_ind_tensor = constant_op.constant(
box_ind, shape=[num_boxes])
crops = image_ops.crop_and_resize(
image_tensor,
boxes_tensor,
box_ind_tensor,
constant_op.constant(
crop_size, shape=[2]))
err = gradient_checker.compute_gradient_error(
[image_tensor, boxes_tensor], [image_shape, boxes_shape],
crops,
crops_shape,
delta=delta,
x_init_value=[image, boxes])
self.assertLess(err, 2e-3)
if __name__ == "__main__":
test.main()
| |
import copy
import re
from enum import Enum
from random import randint, choice
from typing import Union, Optional, List, Any, Dict, Tuple
from netaddr import IPAddress, IPNetwork
from pyASA import address
from pyASA.address import BaseAddress, Address, AnyAddress
from pyASA.aliases import Aliases
from pyASA.baseconfigobject import BaseConfigObject
from pyASA.rulelogging import RuleLogging, LogLevel
class ServiceComparator(Enum):
"""
Class used to represent comparison operators used on TCP/UDP rules for port numbers
"""
EQUAL = ""
NOT_EQUAL = "!="
LESSER = "<"
GREATER = ">"
def to_cli(self) -> str:
"""
Convert ServiceComparator to string corresponding to CLI style comparator.
Returns:
comparator string as used on CLI
"""
translate = {self.EQUAL: "eq", self.NOT_EQUAL: "neq", self.LESSER: "lt", self.GREATER: "gt"}
return translate[self]
@classmethod
def from_cli(cls, line: str) -> "ServiceComparator":
"""
Return ServiceComparator from CLI style string.
Returns:
ServiceComparator matching CLI string
"""
translate = {"eq": cls.EQUAL, "neq": cls.NOT_EQUAL, "lt": cls.LESSER, "gt": cls.GREATER}
return translate[line]
class RuleGeneric(BaseConfigObject):
"""
Class representing ASA firewall rules that use neither TCP/UDP nor ICMP/ICMP6 protocol
"""
def __init__(self, permit: bool = False, protocol: Union[int, str] = "ip",
src: Union[str, IPAddress, IPNetwork, BaseAddress] = "any",
dst: Union[str, IPAddress, IPNetwork, BaseAddress] = "any", remark: Union[None, str, list] = None,
active: bool = True, logging: Optional[RuleLogging] = None, position: int = 0,
is_access_rule: bool = False,
objectid: int = 0):
self._permit = False
self._protocol = 0
self._src = AnyAddress()
self._dst = AnyAddress()
self._remark = []
self._active = True
self._logging = RuleLogging()
self._position = 0
self._is_access_rule = False
self._objectid = 0
self.permit = permit
self.protocol = protocol
self.src = src
self.dst = dst
self.remark = remark
self.active = active
self.logging = logging
self.position = position
self.is_access_rule = is_access_rule
self.objectid = objectid
@property
def permit(self) -> bool:
"""
Return/set if rule is a permit or deny rule.
Returns:
True if rule permits what it describes, False if it denies
"""
return self._permit
@permit.setter
def permit(self, permit: bool):
if not isinstance(permit, bool):
raise TypeError(f"{type(permit)} is not a valid argument type")
self._permit = bool(permit)
@property
def protocol(self) -> int:
"""
Return/set IP protocol value. Accepts integer as well as ASA protocol aliases as strings.
Checks that protocol is neither TCP/UDP nor ICMP/ICMP6, as specific classes for those rules exist.
Returns:
IP protocol number
"""
return self._protocol
@protocol.setter
def protocol(self, protocol: Union[int, str]):
if isinstance(protocol, str) and protocol.isdigit():
protocol = int(protocol)
if isinstance(protocol, str):
if protocol in ["icmp", "icmp6"]:
raise ValueError("Use a RuleICMP object for icmp/icmp6 rules")
elif protocol in ["tcp", "udp"]:
raise ValueError("Use a RuleTCPUDP object for tcp/udp rules")
elif protocol in Aliases.by_alias["protocol"]:
self._protocol = Aliases.by_alias["protocol"][protocol]
else:
raise ValueError(f"'{protocol}' is not a valid protocol alias")
elif isinstance(protocol, int):
if 0 <= protocol <= 255:
if protocol not in [1, 6, 17, 58]:
self._protocol = protocol
elif protocol in [1, 58]:
raise ValueError("Use a RuleICMP object for icmp/icmp6 rules")
elif protocol in [6, 17]:
raise ValueError("Use a RuleTCPUDP object for tcp/udp rules")
else:
raise ValueError("protocol must be in range 0..255")
else:
raise TypeError("protocol must be an integer in range 0..255 or a valid protocol alias string")
@property
def protocol_alias(self) -> str:
"""
Return IP protocol alias, if available, else IP protocol number as string.
Returns:
IP protocol alias, if available, else IP Protocol number as string
"""
if self._protocol in Aliases.by_number["protocol"]:
return Aliases.by_number["protocol"][self._protocol]
else:
return str(self._protocol)
@property
def src(self) -> BaseAddress:
"""
Return/set source address for rule.
Returns:
Either AnyAddress or Address object
"""
return self._src
@property
def dst(self) -> BaseAddress:
"""
Return/set destination address for rule.
Returns:
Either AnyAddress or Address object
"""
return self._dst
@src.setter
def src(self, addr: Union[str, IPAddress, IPNetwork, BaseAddress]):
self._src = address.parse_address(addr)
@dst.setter
def dst(self, addr: Union[str, IPAddress, IPNetwork, BaseAddress]):
self._dst = address.parse_address(addr)
@property
def remark(self) -> List[Optional[str]]:
"""
Return/set remarks (comments) for this rule.
In Cisco ASA CLI logic, all remark line before this rule in the ACL are considered remarks belonging
to this rule. Accordingly, multiple remark strings are being turned into multiple remark lines when this
rule is being sent to the ASA, inserted before this rule.
Returns:
A list containing none, one or multiple strings containing the remarks
"""
return self._remark
@remark.setter
def remark(self, remark: Union[None, str, list]):
if remark is None:
self._remark = []
elif isinstance(remark, str):
self._remark = [str(remark)]
elif isinstance(remark, list):
if not all([isinstance(line, str) for line in remark]):
raise TypeError(f"list contains non-string values")
else:
self._remark = copy.copy(remark)
else:
raise TypeError(f"{type(remark)} is not a valid argument type")
@property
def active(self) -> bool:
"""
Return/set if rule is actually being used. Inactive rules are being skipped when matching traffic to an ACL.
Returns:
True if rule is active, False if not
"""
return self._active
@active.setter
def active(self, active: bool):
if not isinstance(active, bool):
raise TypeError(f"{type(active)} is not a valid argument type")
self._active = bool(active)
@property
def logging(self) -> RuleLogging:
"""
Return/set logging settings for this rule by use of a RuleLogging object.
Returns:
RuleLogging object containing logging level and interval
"""
return self._logging
@logging.setter
def logging(self, log: RuleLogging):
if log is None:
self._logging = RuleLogging()
elif isinstance(log, RuleLogging):
self._logging = log
else:
raise TypeError(f"{type(log)} is not a valid argument type")
@property
def position(self) -> int:
"""
Return/set position of rule in ACL.
Position is being determined when rule is retrieved from ASA, or can be used to determine position in new ACL
it is being appended to.
In contrast to the ASA CLI line number, the position value used by the API only counts 'real' rules,
no remark lines.
Example:
line 1 remark allow all tcp traffic
line 2 extended permit tcp any any
line 3 remark deny all non tcp traffic
line 4 extended deny ip any any
The "deny any" rule is in position 2, when retrieved by the API, not in position 4
Returns:
0 if no position is configured, positive integer if a position has been set
"""
return self._position
@position.setter
def position(self, pos: int):
if isinstance(pos, int):
if pos >= 0:
self._position = int(pos)
else:
raise ValueError("position must a positive integer")
else:
raise ValueError(f"{type(pos)} is not a valid argument type")
@property
def is_access_rule(self) -> bool:
"""
Return/set if rule is an access rule.
This value is only useful when rule is being retrieved from the ASA API, as it is an read-only value.
Changing it has no effect and is being ignored when rule is pushed to ASA.
Changing the value is only used for test and debugging purposes.
Returns:
True if rule is an access rule, False if not
"""
return self._is_access_rule
@is_access_rule.setter
def is_access_rule(self, is_access_rule: bool):
if isinstance(is_access_rule, bool):
self._is_access_rule = bool(is_access_rule)
else:
raise ValueError(f"{type(is_access_rule)} is not a valid argument type")
@property
def objectid(self) -> int:
"""
Return/set objectid of rule.
The objectid is the integer (base 10) equivalent to the hex value shown on the CLI by the
"show access-list" command and is a hash calculated by the ASA based on a rule's properties.
It is mainly used to identify a rule on the CLI or in the log, and changes when the rule properties
change. This does not happen automatically, as the hash algorithm used by the ASA is not public and
the only way to retrieve the value is by getting the rule from the ASA.
Returns:
0 if no objectid is set, else positive integer
"""
return self._objectid
@objectid.setter
def objectid(self, objectid: int):
if isinstance(objectid, int):
if objectid >= 0:
self._objectid = int(objectid)
else:
raise ValueError("objectid must be a positive integer")
else:
raise ValueError(f"{type(objectid)} is not a valid argument type")
@property
def objectid_hexhash(self) -> str:
"""
Return a string representing the objectid of the rule as hex (called hash on the ASA CLI).
Returns:
string hex representation of objectid
"""
return hex(self._objectid)
@staticmethod
def _parse_protocol_json(proto: str) -> int:
"""
Convert protocol string to int, either using a matching protocol alias or via int-to-str conversion.
Args:
proto: protocol string as received from API
Returns:
protocol value
"""
if proto.isdigit():
return int(proto)
elif proto in Aliases.by_alias["protocol"]:
return Aliases.by_alias["protocol"][proto]
else:
raise ValueError(f"{proto} is not a valid protocol alias")
@classmethod
def from_dict(cls, data: dict) -> "RuleGeneric":
"""
Uses a dictionary representation of a rule (most likely converted from JSON data) to create a rule object.
Args:
data: dict to create rule object from, structured like the JSON responses from the API
Returns:
rule object equivalent to the provided data
"""
permit = data["permit"]
src = data["sourceAddress"]["value"]
dst = data["destinationAddress"]["value"]
protocol = cls._parse_protocol_json(data["sourceService"]["value"])
remark = data["remarks"]
active = data["active"]
logging = RuleLogging.from_dict(data["ruleLogging"]) if "ruleLogging" in data else None
position = int(data.get("position", 0))
is_access_rule = bool(data.get("isAccessRule", False))
objectid = int(data.get("objectId", 0))
return cls(permit, protocol, src, dst, remark, active, logging, position, is_access_rule, objectid)
def to_cli(self, acl: Optional[str] = None) -> str:
"""
Return a CLI-style representation of the rule.
Args:
acl: ACL name as string to preprend to the rule in form of "access-list NAME"
Returns:
string containing CLI-style representation
"""
result = f"{'' if acl is None else f'access-list {acl}'} extended {'permit' if self.permit else 'deny'} {self.protocol_alias} {self.src.to_cli()} {self.dst.to_cli()} {self.logging.to_cli()} {'inactive' if not self.active else ''}"
return result.strip().replace(" ", " ")
def to_dict(self) -> Dict[str, Any]:
"""
Return rule data as dict representation in API JSON style.
Returns:
dict of rule values that can be easily converted to JSON for use with API
"""
result = {}
result["permit"] = self._permit
result["sourceAddress"] = self._src.to_dict()
result["destinationAddress"] = self._dst.to_dict()
result["sourceService"] = {"kind": "NetworkProtocol", "value": self.protocol_alias}
result["destinationService"] = {"kind": "NetworkProtocol", "value": self.protocol_alias}
result["active"] = self._active
result["remarks"] = self._remark
result["ruleLogging"] = self._logging.to_dict()
if self._position > 0:
result["position"] = self._position
result["isAccessRule"] = self._is_access_rule
if self._objectid > 0:
result["objectId"] = self._objectid
return result
def clone(self):
"""
Return a identical copy of this rule.
Uses copy.deepcopy, but resets objectid to 0.
Returns:
rule object identical to this rule
"""
rule = copy.deepcopy(self)
rule.objectid = 0
return rule
def __eq__(self, other: object) -> bool:
if isinstance(other, RuleGeneric):
return self.to_dict() == other.to_dict()
else:
return False
def __contains__(self, item: object) -> bool:
"""
Verify if this rule shadows another rule object. Overloads python "in" operator.
Shadowing means that the defintion of this rule object is broad enough to completely cover all packets
that the other rule would match.
Compares source and destination address, protocol, permit and active states.
Examples:
(RuleGeneric("tcp", "any", "any") in RuleGeneric("ip", "any", "any")) == True
rule_a = RuleGeneric()
rule_a.active = False
rule_b = RuleGeneric()
(rule_b in rule_a) == True
rule_b.active = True
(rule_b in rule_a) == False
Args:
item: object to check if it is being shadowed by this rule
Returns:
True if this rule shadows the other rule, False if not
"""
if not isinstance(item, RuleGeneric):
return False
if item.permit != self.permit:
return False
# if rule protocol is 0 (=IP), it covers all other IP protocols
if self.protocol > 0 and self.protocol != item.protocol:
return False
if not isinstance(self.src, AnyAddress) and item.src not in self.src:
return False
if not isinstance(self.dst, AnyAddress) and item.dst not in self.dst:
return False
if item.active != self.active:
return False
return True
@classmethod
def random_rule(cls) -> "RuleGeneric":
"""
Return a non-[TCP/UDP/ICMP/ICMP6] rule, with all values besides remark and is_access_rule randomly chosen.
Mainly used for testing
Returns:
random rule object
"""
permit = choice([True, False])
active = choice([True, False])
protocol = choice([i for i in range(0, 256) if i not in [1, 6, 17, 58]])
if choice([4, 6]) == 6:
if choice([True, False]):
src = IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)
else:
src = IPNetwork(
f"{IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)}/{randint(1, 127)}").cidr
if choice([True, False]):
dst = IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)
else:
dst = IPNetwork(
f"{IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)}/{randint(1, 127)}").cidr
else:
if choice([True, False]):
src = IPAddress(randint(0, 4294967295), version=4)
else:
src = IPNetwork(f"{IPAddress(randint(0, 4294967295))}/{randint(0, 31)}", version=4).cidr
if choice([True, False]):
dst = IPAddress(randint(0, 4294967295), version=4)
else:
dst = IPNetwork(f"{IPAddress(randint(0, 4294967295))}/{randint(0, 31)}", version=4).cidr
log = RuleLogging(choice([level for level in LogLevel]), randint(1, 300))
position = randint(0, 65535)
objectid = randint(0, 4294967295)
rule = cls(permit=permit, protocol=protocol, src=src, dst=dst, logging=log, active=active, position=position,
objectid=objectid)
return rule
class RuleICMP(RuleGeneric):
"""
Class representing ASA firewall rules that use either ICMP or ICMP6 protocol
"""
def __init__(self, permit: bool = False, protocol: Union[int, str] = "icmp",
src: Union[str, IPAddress, IPNetwork, BaseAddress] = "any",
dst: Union[str, IPAddress, IPNetwork, BaseAddress] = "any", icmp_type: Union[str, int] = "any",
icmp_code: Union[int, str] = "any", remark: Union[None, str, list] = None, active: bool = True,
logging: Optional[RuleLogging] = None, position: int = 0, is_access_rule: bool = False,
objectid: int = 0):
RuleGeneric.__init__(self, permit, protocol, src, dst, remark, active, logging, position, is_access_rule,
objectid)
self._icmp_type = -1
self._icmp_code = -1
self.icmp_type = icmp_type
self.icmp_code = icmp_code
@property
def protocol(self) -> int:
"""
Return/set IP protocol value. Accepts integer as well as ASA protocol aliases as strings.
Checks that protocol is ICMP/ICMP6, as this class only supports these protocols.
Returns:
IP protocol number
"""
return self._protocol
@protocol.setter
def protocol(self, protocol: Union[int, str]):
if isinstance(protocol, str) and protocol.isdigit():
protocol = int(protocol)
if isinstance(protocol, int):
if protocol in [1, 58]:
self._protocol = protocol
elif protocol in [6, 17]:
raise ValueError("Use a RuleTCPUDP object for TCP/UDP rules")
else:
raise ValueError("Use a RuleGeneric object for non ICMP/ICMP6 rules")
elif type(protocol) is str:
if protocol in ["icmp", "icmp6"]:
self._protocol = Aliases.by_alias["protocol"][protocol]
elif protocol in ["tcp", "udp"]:
raise ValueError("Use a RuleICMP object for TCP/UDP rules")
else:
raise ValueError("Use a RuleGeneric object for non ICMP/ICMP6 rules")
else:
raise ValueError(
"protocol must be either 1 for icmp, 58 for icmp6 or \"icmp\" or \"icmp6\" protcol alias string")
@property
def icmp_type(self) -> int:
"""
Return/set ICMP type used in this rule. Defaults to -1 for any.
Returns:
ICMP type number
"""
return self._icmp_type
@icmp_type.setter
def icmp_type(self, icmp_type: Union[int, str]):
if isinstance(icmp_type, str) and (icmp_type.isdigit() or icmp_type == "-1"):
icmp_type = int(icmp_type)
if isinstance(icmp_type, str):
if icmp_type in Aliases.by_alias[self.protocol_alias]:
self._icmp_type = Aliases.by_alias[self.protocol_alias][icmp_type]
else:
raise ValueError(f"{icmp_type} is not a valid {self.protocol_alias} service alias")
elif isinstance(icmp_type, int):
if -1 <= icmp_type <= 255:
self._icmp_type = int(icmp_type)
else:
raise ValueError("icmp_type must be in range 0..255 or -1 for any")
else:
raise ValueError(f"{type(icmp_type)} is not a valid argument type")
@property
def icmp_type_alias(self) -> str:
"""
Return ICMP type alias if available, else ICMP type number as string.
Returns:
"""
return Aliases.by_number[self.protocol_alias].get(self._icmp_type, str(self._icmp_type))
@property
def icmp_code(self):
"""
Return/set ICMP code used in this rule. Defaults to -1 for any.
Only has effect if icmp_type is not set to -1
Returns:
ICMP code number
"""
return self._icmp_code
@icmp_code.setter
def icmp_code(self, icmp_code: Union[str, int]):
if isinstance(icmp_code, str) and (icmp_code.isdigit() or icmp_code == "-1"):
icmp_code = int(icmp_code)
if isinstance(icmp_code, str):
if icmp_code == "any":
self._icmp_code = -1
else:
raise ValueError("icmp_code only allows \"any\" as string argument")
elif isinstance(icmp_code, int):
if -1 <= icmp_code <= 255:
self._icmp_code = int(icmp_code)
else:
raise ValueError("icmp_code must be in range 0..255 or -1 for any")
else:
raise ValueError(f"{type(icmp_code)} is not a valid argument type")
@classmethod
def _parse_icmp_json(cls, data: dict) -> Tuple:
"""
Utility function to parse ICMP type and code from an JSON dict.
Args:
data: dict with JSON data to parse
Returns:
ICMP type and code as tuple
"""
regex = re.compile(r"^(icmp6?)/([a-z-]+|[0-9]+)/?(\d{0,3})$")
if data["kind"] == "NetworkProtocol" and data["value"] in ["icmp", "icmp6"]:
protocol = data["value"]
icmp_type = -1
icmp_code = -1
elif data["kind"] in ["ICMPService", "ICMP6Service"]:
finder = regex.match(data["value"])
if finder is not None:
protocol = finder.group(1)
icmp_type = finder.group(2)
if icmp_type.isdigit():
icmp_type = int(icmp_type)
icmp_code = -1
if finder.group(3).isdigit():
icmp_code = int(finder.group(3))
else:
raise ValueError(f"{data} is no valid ICMP/ICMP6 Service JSON data")
else:
raise ValueError(f"{data} is no valid ICMP/ICMP6 Service JSON data")
return protocol, icmp_type, icmp_code
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "RuleICMP":
"""
Uses a dictionary representation of a rule (most likely converted from JSON data) to create a rule object.
Args:
data: dict to create rule object from, structured like the JSON responses from the API
Returns:
rule object equivalent to the provided data
"""
permit = data["permit"]
src = data["sourceAddress"]["value"]
dst = data["destinationAddress"]["value"]
protocol, icmp_type, icmp_code = RuleICMP._parse_icmp_json(data["destinationService"])
remark = data["remarks"]
active = data["active"]
logging = RuleLogging.from_dict(data["ruleLogging"]) if "ruleLogging" in data else None
position = data["position"] if "position" in data else 0
is_access_rule = data["isAccessRule"] if "isAccessRule" in data else False
objectid = int(data["objectId"]) if "objectId" in data else 0
return cls(permit, protocol, src, dst, icmp_type, icmp_code, remark, active, logging, position, is_access_rule,
objectid)
def to_cli(self, acl: Optional[str] = None) -> str:
"""
Return a CLI-style representation of the rule.
Args:
acl: ACL name as string to preprend to the rule in form of "access-list NAME"
Returns:
string containing CLI-style representation
"""
if self.icmp_type == -1:
icmp = ""
else:
if self.icmp_code == -1:
icmp = f"{self.icmp_type_alias}"
else:
icmp = f"{self.icmp_type_alias} {self.icmp_code}"
result = f"{'' if acl is None else f'access-list {acl}'} extended {'permit' if self.permit else 'deny'} {self.protocol_alias} {self.src.to_cli()} {self.dst.to_cli()} {icmp} {self.logging.to_cli()} {'inactive' if not self.active else ''}"
return result.strip().replace(" ", " ")
def to_dict(self) -> Dict[str, Any]:
"""
Return rule data as dict representation in API JSON style.
Returns:
dict of rule values that can be easily converted to JSON for use with API
"""
result = RuleGeneric.to_dict(self)
result["sourceService"] = {"kind": "NetworkProtocol", "value": self.protocol_alias}
if self._icmp_type == -1:
result["destinationService"] = {"kind": "NetworkProtocol", "value": self.protocol_alias}
else:
if self._icmp_code == -1:
result["destinationService"] = {
"kind": f"{self.protocol_alias.upper()}Service",
"value": f"{self.protocol_alias}/{self.icmp_type_alias}"
}
else:
result["destinationService"] = {
"kind": f"{self.protocol_alias.upper()}Service",
"value": f"{self.protocol_alias}/{self.icmp_type_alias}/{self.icmp_code}"
}
return result
def __contains__(self, item: object) -> bool:
"""
Verify if this rule shadows another rule object. Overloads python "in" operator.
For details on shadowing, see RuleGeneric.__contains__ documentation.
Args:
item: object to check if it is being shadowed by this rule
Returns:
True if this rule shadows the other rule, False if not
"""
if not isinstance(item, RuleICMP):
return False
if not RuleGeneric.__contains__(self, item):
return False
if self.icmp_type != -1:
if item.icmp_type != self.icmp_type:
return False
if self.icmp_code != -1:
if item.icmp_code != self.icmp_code:
return False
return True
@classmethod
def random_rule(cls) -> "RuleICMP":
"""
Return a random ICMP or ICMP6 rule, with all values besides remark and is_access_rule randomly chosen.
Mainly used for testing
Returns:
random rule object
"""
permit = choice([True, False])
active = choice([True, False])
protocol = choice(["icmp", "icmp6"])
if protocol == "icmp6":
if choice([True, False]):
src = IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)
else:
src = IPNetwork(
f"{IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)}/{randint(1, 127)}").cidr
if choice([True, False]):
dst = IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)
else:
dst = IPNetwork(
f"{IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)}/{randint(1, 127)}").cidr
else:
if choice([True, False]):
src = IPAddress(randint(0, 4294967295), version=4)
else:
src = IPNetwork(f"{IPAddress(randint(0, 4294967295))}/{randint(0, 31)}", version=4).cidr
if choice([True, False]):
dst = IPAddress(randint(0, 4294967295), version=4)
else:
dst = IPNetwork(f"{IPAddress(randint(0, 4294967295))}/{randint(0, 31)}", version=4).cidr
icmp_type = randint(-1, 255)
icmp_code = randint(-1, 255)
log = RuleLogging(choice([level for level in LogLevel]), randint(1, 300))
position = randint(0, 65535)
objectid = randint(0, 4294967295)
rule = cls(permit=permit, protocol=protocol, src=src, dst=dst, icmp_type=icmp_type, icmp_code=icmp_code,
logging=log, active=active, position=position, objectid=objectid)
return rule
class RuleTCPUDP(RuleGeneric):
"""
Class representing ASA firewall rules that use either TCP or UDP protocol
"""
def __init__(self, permit: bool = False, protocol: Union[int, str] = "tcp",
src: Union[str, IPAddress, IPNetwork, BaseAddress] = "any",
dst: Union[str, IPAddress, IPNetwork, BaseAddress] = "any",
src_port: Union[int, str] = "any", dst_port: Union[int, str] = "any",
src_comp: Union[ServiceComparator, str] = ServiceComparator.EQUAL,
dst_comp: Union[ServiceComparator, str] = ServiceComparator.EQUAL,
remark: Union[None, str, list] = None, active: bool = True, logging: Optional[RuleLogging] = None,
position: int = 0, is_access_rule: bool = False, objectid: int = 0):
RuleGeneric.__init__(self, permit, protocol, src, dst, remark, active, logging, position, is_access_rule,
objectid)
self._src_port = -1
self._dst_port = -1
self._src_comp = ServiceComparator.EQUAL
self._dst_comp = ServiceComparator.EQUAL
self.src_port = src_port
self.dst_port = dst_port
self.src_comp = src_comp
self.dst_comp = dst_comp
@property
def protocol(self) -> int:
"""
Return/set IP protocol value. Accepts integer as well as ASA protocol aliases as strings.
Checks that protocol is TCP/UDP, as this class only supports these protocols.
Returns:
IP protocol number
"""
return self._protocol
@protocol.setter
def protocol(self, protocol: Union[int, str]):
if isinstance(protocol, str) and protocol.isdigit():
protocol = int(protocol)
if isinstance(protocol, int):
if protocol in [6, 17]:
self._protocol = protocol
elif protocol in [1, 58]:
raise ValueError("Use a RuleICMP object for ICMP/ICMP6 rules")
else:
raise ValueError("Use a RuleGeneric object for non TCP/UDP rules")
elif type(protocol) is str:
if protocol in ["tcp", "udp"]:
self._protocol = Aliases.by_alias["protocol"][protocol]
elif protocol in ["icmp", "icmp6"]:
raise ValueError("Use a RuleICMP object for ICMP/ICMP6 rules")
else:
raise ValueError("Use a RuleGeneric object for non TCP/UDP rules")
else:
raise ValueError("protocol must be either 6 for tcp, 17 for udp or \"tcp\" or \"udp\" protcol alias string")
@property
def src_port(self) -> int:
"""
Return/set TCP/UDP source port for this rule. Defaults to -1 which means "any"
Checks that port is valid (in range 1..65535).
"""
return self._src_port
@property
def dst_port(self) -> int:
"""
Return/set TCP/UDP destination port for this rule. Defaults to -1 which means "any"
Checks that port is valid (in range 1..65535).
"""
return self._dst_port
@src_port.setter
def src_port(self, port: Union[int, str]):
self._src_port = self._parse_port(port)
@dst_port.setter
def dst_port(self, port: Union[int, str]):
self._dst_port = self._parse_port(port)
def _parse_port(self, port: Union[int, str]) -> int:
"""
Parse port functionality. Used in src_port/dst_port setters.
Args:
port: port data to be parsed, accepts port numbers or port aliases
Returns:
port value converted to integer
"""
if isinstance(port, str) and (port.isdigit() or port == "-1"):
port = int(port)
if isinstance(port, str):
if port in Aliases.by_alias[self.protocol_alias]:
return Aliases.by_alias[self.protocol_alias][port]
else:
raise ValueError(f"{port} is not a valid {self.protocol_alias} service alias")
elif isinstance(port, int):
if 1 <= port <= 65535 or port == -1:
return int(port)
else:
raise ValueError("port must be in range 1..65535 or -1 for any")
else:
raise TypeError(f"{type(port)} is not a valid argument type")
@property
def src_port_alias(self) -> str:
"""
Return source port alias, if available, else port number as string
Returns:
port alias or number
"""
return Aliases.by_number[self.protocol_alias].get(self._src_port, str(self._src_port))
@property
def dst_port_alias(self) -> str:
"""
Return destination port alias, if available, else port number as string
Returns:
port alias or number
"""
return Aliases.by_number[self.protocol_alias].get(self._dst_port, str(self._dst_port))
@property
def src_comp(self) -> ServiceComparator:
"""
Return/set source comparator for comparison of the source port.
Returns:
Source port comparator
"""
return self._src_comp
@property
def dst_comp(self) -> ServiceComparator:
"""
Return/set destination comparator for comparison of the destination port.
Returns:
Destination port comparator
"""
return self._dst_comp
@src_comp.setter
def src_comp(self, comp: Union[ServiceComparator, str]):
self._src_comp = self._set_comparator(comp)
@dst_comp.setter
def dst_comp(self, comp: Union[ServiceComparator, str]):
self._dst_comp = self._set_comparator(comp)
@classmethod
def _set_comparator(cls, comp: Union[ServiceComparator, str]) -> ServiceComparator:
"""
Utility function to parse src_comp/dst_comp setter values for validity and return corresponding value
Args:
comp: value to be parsed
Returns:
ServiceComparator object
"""
if isinstance(comp, ServiceComparator):
return comp
if isinstance(comp, str):
if comp in [enum.value for enum in ServiceComparator]:
return ServiceComparator(comp)
else:
raise ValueError(f"{comp} is not a valid ServiceComparator alias")
else:
raise ValueError(f"{type(comp)} is not a valid argument type")
@classmethod
def _parse_port_json(cls, data: dict) -> Tuple:
"""
Utility function to parse TCP/UDP port and comparator data from an JSON dict.
Args:
data: dict with JSON data to parse
Returns:
TCP/UDP port and comparator as tuple
"""
regex = re.compile(r"^(|(?:!=)?|<?|>?)(tcp|udp)/([a-z0-9-]+)$")
if data["kind"] == "NetworkProtocol":
protocol = data["value"]
port = "any"
comparator = ServiceComparator.EQUAL
else:
finder = regex.match(data["value"])
if finder is not None:
comparator = ServiceComparator(finder.group(1))
protocol = finder.group(2)
port = finder.group(3)
if port.isdigit():
port = int(port)
else:
raise ValueError(f"{data} is not valid Service JSON data")
return protocol, port, comparator
@classmethod
def from_dict(cls, data: dict) -> "RuleTCPUDP":
"""
Uses a dictionary representation of a rule (most likely converted from JSON data) to create a rule object.
Args:
data: dict to create rule object from, structured like the JSON responses from the API
Returns:
rule object equivalent to the provided data
"""
permit = data["permit"]
src = data["sourceAddress"]["value"]
dst = data["destinationAddress"]["value"]
protocol, src_port, src_comp = RuleTCPUDP._parse_port_json(data["sourceService"])
__, dst_port, dst_comp = RuleTCPUDP._parse_port_json(data["destinationService"])
remark = data["remarks"]
active = data["active"]
logging = RuleLogging.from_dict(data["ruleLogging"]) if "ruleLogging" in data else None
position = data["position"] if "position" in data else 0
is_access_rule = data["isAccessRule"] if "isAccessRule" in data else False
objectid = int(data["objectId"]) if "objectId" in data else 0
return cls(permit, protocol, src, dst, src_port, dst_port, src_comp, dst_comp, remark, active,
logging, position, is_access_rule, objectid)
def to_cli(self, acl: Optional[str] = None) -> str:
"""
Return a CLI-style representation of the rule.
Args:
acl: ACL name as string to preprend to the rule in form of "access-list NAME"
Returns:
string containing CLI-style representation
"""
src_port = "" if self.src_port == -1 else f"{self.src_comp.to_cli()} {self.src_port_alias}"
dst_port = "" if self.dst_port == -1 else f"{self.dst_comp.to_cli()} {self.dst_port_alias}"
result = f"{'' if acl is None else f'access-list {acl}'} extended {'permit' if self.permit else 'deny'} {self.protocol_alias} {self.src.to_cli()} {src_port} {self.dst.to_cli()} {dst_port} {self.logging.to_cli()} {'inactive' if not self.active else ''}"
return result.strip().replace(" ", " ")
def to_dict(self) -> Dict[str, Any]:
"""
Return rule data as dict representation in API JSON style.
Returns:
dict of rule values that can be easily converted to JSON for use with API
"""
result = RuleGeneric.to_dict(self)
if self._src_port == -1:
result["sourceService"] = {"kind": "NetworkProtocol", "value": self.protocol_alias}
else:
result["sourceService"] = {
"kind": "TcpUdpService",
"value": f"{self._src_comp.value}{self.protocol_alias}/{self.src_port_alias}"
}
if self._dst_port == -1:
result["destinationService"] = {"kind": "NetworkProtocol", "value": self.protocol_alias}
else:
result["destinationService"] = {
"kind": "TcpUdpService",
"value": f"{self._dst_comp.value}{self.protocol_alias}/{self.dst_port_alias}"
}
return result
def __contains__(self, item: object) -> bool:
"""
Verify if this rule shadows another rule object. Overloads python "in" operator.
For details on shadowing, see RuleGeneric.__contains__ documentation.
Args:
item: object to check if it is being shadowed by this rule
Returns:
True if this rule shadows the other rule, False if not
"""
if not isinstance(item, RuleTCPUDP):
return False
if not RuleGeneric.__contains__(self, item):
return False
if self.src_port != -1:
if item.src_port == -1:
return False
if self.src_comp in [ServiceComparator.EQUAL, ServiceComparator.NOT_EQUAL]:
if item.src_comp != self.src_comp:
return False
elif item.src_port != self.src_port:
return False
elif self.src_comp == ServiceComparator.GREATER:
if item.src_comp in [ServiceComparator.LESSER, ServiceComparator.NOT_EQUAL]:
return False
elif item.src_comp == ServiceComparator.EQUAL:
if item.src_port <= self.src_port:
return False
elif item.src_comp == ServiceComparator.GREATER:
if item.src_port < self.src_port:
return False
elif self.src_comp == ServiceComparator.LESSER:
if item.src_comp in [ServiceComparator.GREATER, ServiceComparator.NOT_EQUAL]:
return False
elif item.src_comp == ServiceComparator.EQUAL:
if item.src_port >= self.src_port:
return False
elif item.src_comp == ServiceComparator.LESSER:
if item.src_port > self.src_port:
return False
if self.dst_port != -1:
if item.dst_port == -1:
return False
if self.dst_comp in [ServiceComparator.EQUAL, ServiceComparator.NOT_EQUAL]:
if item.dst_comp != self.dst_comp:
return False
elif item.dst_port != self.dst_port:
return False
elif self.dst_comp == ServiceComparator.GREATER:
if item.dst_comp in [ServiceComparator.LESSER, ServiceComparator.NOT_EQUAL]:
return False
elif item.dst_comp == ServiceComparator.EQUAL:
if item.dst_port <= self.dst_port:
return False
elif item.dst_comp == ServiceComparator.GREATER:
if item.dst_port < self.dst_port:
return False
elif self.dst_comp == ServiceComparator.LESSER:
if item.dst_comp in [ServiceComparator.GREATER, ServiceComparator.NOT_EQUAL]:
return False
elif item.dst_comp == ServiceComparator.EQUAL:
if item.dst_port >= self.dst_port:
return False
elif item.dst_comp == ServiceComparator.LESSER:
if item.dst_port > self.dst_port:
return False
return True
@classmethod
def random_rule(cls) -> "RuleTCPUDP":
"""
Return a random TCP or UDP rule, with all values besides remark and is_access_rule randomly chosen.
Mainly used for testing
Returns:
random rule object
"""
permit = choice([True, False])
active = choice([True, False])
protocol = choice(["tcp", "udp"])
if choice([4, 6]) == 6:
if choice([True, False]):
src = IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)
else:
src = IPNetwork(
f"{IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)}/{randint(1, 127)}").cidr
if choice([True, False]):
dst = IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)
else:
dst = IPNetwork(
f"{IPAddress(randint(0, 340282366920938463463374607431768211455), version=6)}/{randint(1, 127)}").cidr
else:
if choice([True, False]):
src = IPAddress(randint(0, 4294967295), version=4)
else:
src = IPNetwork(f"{IPAddress(randint(0, 4294967295))}/{randint(0, 31)}", version=4).cidr
if choice([True, False]):
dst = IPAddress(randint(0, 4294967295), version=4)
else:
dst = IPNetwork(f"{IPAddress(randint(0, 4294967295))}/{randint(0, 31)}", version=4).cidr
src_port = randint(0, 65535)
if src_port == 0:
src_port = -1
dst_port = randint(0, 65535)
if dst_port == 0:
dst_port = -1
src_comp = choice([comp for comp in ServiceComparator])
dst_comp = choice([comp for comp in ServiceComparator])
log = RuleLogging(choice([level for level in LogLevel]), randint(1, 300))
position = randint(0, 65535)
objectid = randint(0, 4294967295)
rule = cls(permit=permit, protocol=protocol, src=src, dst=dst, src_port=src_port, dst_port=dst_port,
src_comp=src_comp, dst_comp=dst_comp, logging=log, active=active,
position=position, objectid=objectid)
return rule
def rule_from_dict(data: dict) -> RuleGeneric:
"""
Determines which class of rule object to use for a given JSON dict.
Invokes class specific from_rule() and returns the object.
Args:
data: dict to determine rule type from, structured like the JSON responses from the API
Returns:
rule object equivalent to the provided data
"""
if any(any(proto in value for proto in ("tcp", "udp")) for value in
(data["sourceService"]["value"], data["destinationService"]["value"])):
return RuleTCPUDP.from_dict(data)
elif any(any(proto in value for proto in ("icmp", "icmp6")) for value in
(data["sourceService"]["value"], data["destinationService"]["value"])):
return RuleICMP.from_dict(data)
else:
return RuleGeneric.from_dict(data)
def rule_from_cli(line: str) -> RuleGeneric:
"""
Determines which class of rule object to use for a given CLI like .
Invokes class specific from_rule() and returns the object.
Args:
line: CLI line to determine rule type from, structured like the JSON responses from the API
Returns:
rule object equivalent to the provided data
"""
# very complex regular expression roughly validating and seperating valid ASA ACL CLI lines.
# For understanding and debugging see https://regex101.com/ or https://www.debuggex.com/
cli_line_regex = r"^(?:(?:access-list (?P<acl>[\w\d]+) (?:line (?P<line>\d+) )?)?extended )?(?P<permit>deny|permit) (?P<proto>\w+) (?P<src>any[46]?|host \d{1,3}(?:\.\d{1,3}){3}|\d{1,3}(?:\.\d{1,3}){3} \d{1,3}(?:\.\d{1,3}){3}|(?:host )?(?:[0-9a-f]{0,4}:){2,7}(?::|[0-9a-f]{0,4})(?:\/\d{1,3})?)(?: (?P<srccomp>eq|neq|gt|lt) (?P<srcport>\d{1,5}|[\d\w-]+))? (?P<dst>any[46]?|host \d{1,3}(?:\.\d{1,3}){3}|\d{1,3}(?:\.\d{1,3}){3} \d{1,3}(?:\.\d{1,3}){3}|(?:host )?(?:[0-9a-f]{0,4}:){2,7}(?::|[0-9a-f]{0,4})(?:\/\d{1,3})?)(?:(?: (?P<dstcomp>eq|neq|gt|lt) (?P<dstport>\d{1,5}|[\d\w-]+))|\s(?P<icmptype>[a-z\d-]+)\s?(?P<icmpcode>\d{1,3})?)?(?: log (?P<level>\w+)(?: interval (?P<interval>\d+))?)?(?: (?P<active>inactive))?$"
regex = re.compile(cli_line_regex)
finder = regex.fullmatch(line)
if finder is None:
raise ValueError("line parameter is not a valid ACL cli line")
permit = True if finder.group("permit") == "permit" else False
active = False if finder.group("active") else True
proto = Aliases.by_alias["protocol"][finder.group("proto")] if finder.group("proto") in Aliases.by_alias[
"protocol"] else int(finder.group("proto"))
src = Address.from_cli(finder.group("src")) if finder.group("src") != "any" else AnyAddress()
dst = Address.from_cli(finder.group("dst")) if finder.group("dst") != "any" else AnyAddress()
if finder.group("level"):
if finder.group("interval"):
log = RuleLogging(interval=int(finder.group("interval")), level=LogLevel.from_cli(finder.group("level")))
else:
log = RuleLogging(level=LogLevel.from_cli(finder.group("level")))
else:
log = RuleLogging()
position = int(finder.group("line")) if finder.group("line") else 0
if proto in [6, 17]:
srcport = finder.group("srcport") if finder.group("srcport") else -1
dstport = finder.group("dstport") if finder.group("srcport") else -1
srccomp = ServiceComparator.from_cli(finder.group("srccomp")) if finder.group(
"srccomp") else ServiceComparator.EQUAL
dstcomp = ServiceComparator.from_cli(finder.group("dstcomp")) if finder.group(
"srccomp") else ServiceComparator.EQUAL
return RuleTCPUDP(permit=permit, protocol=proto, src=src, dst=dst, active=active, logging=log, src_port=srcport,
dst_port=dstport, src_comp=srccomp, dst_comp=dstcomp, position=position)
elif proto in [1, 58]:
icmp_type = finder.group("icmptype") if finder.group("icmptype") else -1
icmp_code = int(finder.group("icmpcode")) if finder.group("icmpcode") else -1
return RuleICMP(permit=permit, protocol=proto, src=src, dst=dst, active=active, logging=log,
icmp_type=icmp_type,
icmp_code=icmp_code, position=position)
else:
return RuleGeneric(permit=permit, protocol=proto, src=src, dst=dst, active=active, logging=log,
position=position)
def random_rule() -> RuleGeneric:
"""
Return a random rule of any of the three classes, invoking the random_rule() from that class.
Mainly used for testing
Returns:
random rule object
"""
ruletype = randint(1, 4)
if ruletype == 1:
return RuleTCPUDP.random_rule()
elif ruletype == 2:
return RuleICMP.random_rule()
else:
return RuleGeneric.random_rule()
| |
# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import hashlib
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.compat import json
import boto
class SNSConnection(AWSQueryConnection):
"""
Amazon Simple Notification Service
Amazon Simple Notification Service (Amazon SNS) is a web service
that enables you to build distributed web-enabled applications.
Applications can use Amazon SNS to easily push real-time
notification messages to interested subscribers over multiple
delivery protocols. For more information about this product see
`http://aws.amazon.com/sns`_. For detailed information about
Amazon SNS features and their associated API calls, see the
`Amazon SNS Developer Guide`_.
We also provide SDKs that enable you to access Amazon SNS from
your preferred programming language. The SDKs contain
functionality that automatically takes care of tasks such as:
cryptographically signing your service requests, retrying
requests, and handling error responses. For a list of available
SDKs, go to `Tools for Amazon Web Services`_.
"""
DefaultRegionName = boto.config.get('Boto', 'sns_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'sns_region_endpoint',
'sns.us-east-1.amazonaws.com')
APIVersion = boto.config.get('Boto', 'sns_version', '2010-03-31')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True,
profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
connection_cls=SNSConnection)
self.region = region
super(SNSConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _build_dict_as_list_params(self, params, dictionary, name):
"""
Serialize a parameter 'name' which value is a 'dictionary' into a list of parameters.
See: http://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html
For example::
dictionary = {'PlatformPrincipal': 'foo', 'PlatformCredential': 'bar'}
name = 'Attributes'
would result in params dict being populated with:
Attributes.entry.1.key = PlatformPrincipal
Attributes.entry.1.value = foo
Attributes.entry.2.key = PlatformCredential
Attributes.entry.2.value = bar
:param params: the resulting parameters will be added to this dict
:param dictionary: dict - value of the serialized parameter
:param name: name of the serialized parameter
"""
items = sorted(dictionary.items(), key=lambda x:x[0])
for kv, index in zip(items, list(range(1, len(items)+1))):
key, value = kv
prefix = '%s.entry.%s' % (name, index)
params['%s.key' % prefix] = key
params['%s.value' % prefix] = value
def _required_auth_capability(self):
return ['hmac-v4']
def get_all_topics(self, next_token=None):
"""
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListTopics', params)
def get_topic_attributes(self, topic):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
"""
params = {'TopicArn': topic}
return self._make_request('GetTopicAttributes', params)
def set_topic_attributes(self, topic, attr_name, attr_value):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
:type attr_name: string
:param attr_name: The name of the attribute you want to set.
Only a subset of the topic's attributes are mutable.
Valid values: Policy | DisplayName
:type attr_value: string
:param attr_value: The new value for the attribute.
"""
params = {'TopicArn': topic,
'AttributeName': attr_name,
'AttributeValue': attr_value}
return self._make_request('SetTopicAttributes', params)
def add_permission(self, topic, label, account_ids, actions):
"""
Adds a statement to a topic's access control policy, granting
access for the specified AWS accounts to the specified actions.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the new policy statement.
:type account_ids: list of strings
:param account_ids: The AWS account ids of the users who will be
give access to the specified actions.
:type actions: list of strings
:param actions: The actions you want to allow for each of the
specified principal(s).
"""
params = {'TopicArn': topic,
'Label': label}
self.build_list_params(params, account_ids, 'AWSAccountId.member')
self.build_list_params(params, actions, 'ActionName.member')
return self._make_request('AddPermission', params)
def remove_permission(self, topic, label):
"""
Removes a statement from a topic's access control policy.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the policy statement
to be removed.
"""
params = {'TopicArn': topic,
'Label': label}
return self._make_request('RemovePermission', params)
def create_topic(self, topic):
"""
Create a new Topic.
:type topic: string
:param topic: The name of the new topic.
"""
params = {'Name': topic}
return self._make_request('CreateTopic', params)
def delete_topic(self, topic):
"""
Delete an existing topic
:type topic: string
:param topic: The ARN of the topic
"""
params = {'TopicArn': topic}
return self._make_request('DeleteTopic', params, '/', 'GET')
def publish(self, topic=None, message=None, subject=None, target_arn=None,
message_structure=None, message_attributes=None):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type message: string
:param message: The message you want to send to the topic.
Messages must be UTF-8 encoded strings and
be at most 4KB in size.
:type message_structure: string
:param message_structure: Optional parameter. If left as ``None``,
plain text will be sent. If set to ``json``,
your message should be a JSON string that
matches the structure described at
http://docs.aws.amazon.com/sns/latest/dg/PublishTopic.html#sns-message-formatting-by-protocol
:type message_attributes: dict
:param message_attributes: Message attributes to set. Should be
of the form:
.. code-block:: python
{
"name1": {
"data_type": "Number",
"string_value": "42"
},
"name2": {
"data_type": "String",
"string_value": "Bob"
}
}
:type subject: string
:param subject: Optional parameter to be used as the "Subject"
line of the email notifications.
:type target_arn: string
:param target_arn: Optional parameter for either TopicArn or
EndpointArn, but not both.
"""
if message is None:
# To be backwards compatible when message did not have
# a default value and topic and message were required
# args.
raise TypeError("'message' is a required parameter")
params = {'Message': message}
if subject is not None:
params['Subject'] = subject
if topic is not None:
params['TopicArn'] = topic
if target_arn is not None:
params['TargetArn'] = target_arn
if message_structure is not None:
params['MessageStructure'] = message_structure
if message_attributes is not None:
keys = sorted(message_attributes.keys())
for i, name in enumerate(keys, start=1):
attribute = message_attributes[name]
params['MessageAttributes.entry.{0}.Name'.format(i)] = name
if 'data_type' in attribute:
params['MessageAttributes.entry.{0}.Value.DataType'.format(i)] = \
attribute['data_type']
if 'string_value' in attribute:
params['MessageAttributes.entry.{0}.Value.StringValue'.format(i)] = \
attribute['string_value']
if 'binary_value' in attribute:
params['MessageAttributes.entry.{0}.Value.BinaryValue'.format(i)] = \
attribute['binary_value']
return self._make_request('Publish', params, '/', 'POST')
def subscribe(self, topic, protocol, endpoint):
"""
Subscribe to a Topic.
:type topic: string
:param topic: The ARN of the new topic.
:type protocol: string
:param protocol: The protocol used to communicate with
the subscriber. Current choices are:
email|email-json|http|https|sqs|sms|application
:type endpoint: string
:param endpoint: The location of the endpoint for
the subscriber.
* For email, this would be a valid email address
* For email-json, this would be a valid email address
* For http, this would be a URL beginning with http
* For https, this would be a URL beginning with https
* For sqs, this would be the ARN of an SQS Queue
* For sms, this would be a phone number of an
SMS-enabled device
* For application, the endpoint is the EndpointArn
of a mobile app and device.
"""
params = {'TopicArn': topic,
'Protocol': protocol,
'Endpoint': endpoint}
return self._make_request('Subscribe', params)
def subscribe_sqs_queue(self, topic, queue):
"""
Subscribe an SQS queue to a topic.
This is convenience method that handles most of the complexity involved
in using an SQS queue as an endpoint for an SNS topic. To achieve this
the following operations are performed:
* The correct ARN is constructed for the SQS queue and that ARN is
then subscribed to the topic.
* A JSON policy document is contructed that grants permission to
the SNS topic to send messages to the SQS queue.
* This JSON policy is then associated with the SQS queue using
the queue's set_attribute method. If the queue already has
a policy associated with it, this process will add a Statement to
that policy. If no policy exists, a new policy will be created.
:type topic: string
:param topic: The ARN of the new topic.
:type queue: A boto Queue object
:param queue: The queue you wish to subscribe to the SNS Topic.
"""
t = queue.id.split('/')
q_arn = queue.arn
sid = hashlib.md5((topic + q_arn).encode('utf-8')).hexdigest()
sid_exists = False
resp = self.subscribe(topic, 'sqs', q_arn)
attr = queue.get_attributes('Policy')
if 'Policy' in attr:
policy = json.loads(attr['Policy'])
else:
policy = {}
if 'Version' not in policy:
policy['Version'] = '2008-10-17'
if 'Statement' not in policy:
policy['Statement'] = []
# See if a Statement with the Sid exists already.
for s in policy['Statement']:
if s['Sid'] == sid:
sid_exists = True
if not sid_exists:
statement = {'Action': 'SQS:SendMessage',
'Effect': 'Allow',
'Principal': {'AWS': '*'},
'Resource': q_arn,
'Sid': sid,
'Condition': {'StringLike': {'aws:SourceArn': topic}}}
policy['Statement'].append(statement)
queue.set_attribute('Policy', json.dumps(policy))
return resp
def confirm_subscription(self, topic, token,
authenticate_on_unsubscribe=False):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type token: string
:param token: Short-lived token sent to and endpoint during
the Subscribe operation.
:type authenticate_on_unsubscribe: bool
:param authenticate_on_unsubscribe: Optional parameter indicating
that you wish to disable
unauthenticated unsubscription
of the subscription.
"""
params = {'TopicArn': topic, 'Token': token}
if authenticate_on_unsubscribe:
params['AuthenticateOnUnsubscribe'] = 'true'
return self._make_request('ConfirmSubscription', params)
def unsubscribe(self, subscription):
"""
Allows endpoint owner to delete subscription.
Confirmation message will be delivered.
:type subscription: string
:param subscription: The ARN of the subscription to be deleted.
"""
params = {'SubscriptionArn': subscription}
return self._make_request('Unsubscribe', params)
def get_all_subscriptions(self, next_token=None):
"""
Get list of all subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListSubscriptions', params)
def get_all_subscriptions_by_topic(self, topic, next_token=None):
"""
Get list of all subscriptions to a specific topic.
:type topic: string
:param topic: The ARN of the topic for which you wish to
find subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {'TopicArn': topic}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListSubscriptionsByTopic', params)
def create_platform_application(self, name=None, platform=None,
attributes=None):
"""
The `CreatePlatformApplication` action creates a platform
application object for one of the supported push notification
services, such as APNS and GCM, to which devices and mobile
apps may register. You must specify PlatformPrincipal and
PlatformCredential attributes when using the
`CreatePlatformApplication` action. The PlatformPrincipal is
received from the notification service. For APNS/APNS_SANDBOX,
PlatformPrincipal is "SSL certificate". For GCM,
PlatformPrincipal is not applicable. For ADM,
PlatformPrincipal is "client id". The PlatformCredential is
also received from the notification service. For
APNS/APNS_SANDBOX, PlatformCredential is "private key". For
GCM, PlatformCredential is "API key". For ADM,
PlatformCredential is "client secret". The
PlatformApplicationArn that is returned when using
`CreatePlatformApplication` is then used as an attribute for
the `CreatePlatformEndpoint` action. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type name: string
:param name: Application names must be made up of only uppercase and
lowercase ASCII letters, numbers, underscores, hyphens, and
periods, and must be between 1 and 256 characters long.
:type platform: string
:param platform: The following platforms are supported: ADM (Amazon
Device Messaging), APNS (Apple Push Notification Service),
APNS_SANDBOX, and GCM (Google Cloud Messaging).
:type attributes: map
:param attributes: For a list of attributes, see
`SetPlatformApplicationAttributes`_
"""
params = {}
if name is not None:
params['Name'] = name
if platform is not None:
params['Platform'] = platform
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='CreatePlatformApplication',
params=params)
def set_platform_application_attributes(self,
platform_application_arn=None,
attributes=None):
"""
The `SetPlatformApplicationAttributes` action sets the
attributes of the platform application object for the
supported push notification services, such as APNS and GCM.
For more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
SetPlatformApplicationAttributes action.
:type attributes: map
:param attributes:
A map of the platform application attributes. Attributes in this map
include the following:
+ `PlatformCredential` -- The credential received from the notification
service. For APNS/APNS_SANDBOX, PlatformCredential is "private
key". For GCM, PlatformCredential is "API key". For ADM,
PlatformCredential is "client secret".
+ `PlatformPrincipal` -- The principal received from the notification
service. For APNS/APNS_SANDBOX, PlatformPrincipal is "SSL
certificate". For GCM, PlatformPrincipal is not applicable. For
ADM, PlatformPrincipal is "client id".
+ `EventEndpointCreated` -- Topic ARN to which EndpointCreated event
notifications should be sent.
+ `EventEndpointDeleted` -- Topic ARN to which EndpointDeleted event
notifications should be sent.
+ `EventEndpointUpdated` -- Topic ARN to which EndpointUpdate event
notifications should be sent.
+ `EventDeliveryFailure` -- Topic ARN to which DeliveryFailure event
notifications should be sent upon Direct Publish delivery failure
(permanent) to one of the application's endpoints.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='SetPlatformApplicationAttributes',
params=params)
def get_platform_application_attributes(self,
platform_application_arn=None):
"""
The `GetPlatformApplicationAttributes` action retrieves the
attributes of the platform application object for the
supported push notification services, such as APNS and GCM.
For more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
GetPlatformApplicationAttributesInput.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
return self._make_request(action='GetPlatformApplicationAttributes',
params=params)
def list_platform_applications(self, next_token=None):
"""
The `ListPlatformApplications` action lists the platform
application objects for the supported push notification
services, such as APNS and GCM. The results for
`ListPlatformApplications` are paginated and return a limited
list of applications, up to 100. If additional records are
available after the first page results, then a NextToken
string will be returned. To receive the next page, you call
`ListPlatformApplications` using the NextToken string received
from the previous call. When there are no more records to
return, NextToken will be null. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type next_token: string
:param next_token: NextToken string is used when calling
ListPlatformApplications action to retrieve additional records that
are available after the first page results.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self._make_request(action='ListPlatformApplications',
params=params)
def list_endpoints_by_platform_application(self,
platform_application_arn=None,
next_token=None):
"""
The `ListEndpointsByPlatformApplication` action lists the
endpoints and endpoint attributes for devices in a supported
push notification service, such as GCM and APNS. The results
for `ListEndpointsByPlatformApplication` are paginated and
return a limited list of endpoints, up to 100. If additional
records are available after the first page results, then a
NextToken string will be returned. To receive the next page,
you call `ListEndpointsByPlatformApplication` again using the
NextToken string received from the previous call. When there
are no more records to return, NextToken will be null. For
more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
ListEndpointsByPlatformApplicationInput action.
:type next_token: string
:param next_token: NextToken string is used when calling
ListEndpointsByPlatformApplication action to retrieve additional
records that are available after the first page results.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if next_token is not None:
params['NextToken'] = next_token
return self._make_request(action='ListEndpointsByPlatformApplication',
params=params)
def delete_platform_application(self, platform_application_arn=None):
"""
The `DeletePlatformApplication` action deletes a platform
application object for one of the supported push notification
services, such as APNS and GCM. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn of platform
application object to delete.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
return self._make_request(action='DeletePlatformApplication',
params=params)
def create_platform_endpoint(self, platform_application_arn=None,
token=None, custom_user_data=None,
attributes=None):
"""
The `CreatePlatformEndpoint` creates an endpoint for a device
and mobile app on one of the supported push notification
services, such as GCM and APNS. `CreatePlatformEndpoint`
requires the PlatformApplicationArn that is returned from
`CreatePlatformApplication`. The EndpointArn that is returned
when using `CreatePlatformEndpoint` can then be used by the
`Publish` action to send a message to a mobile app or by the
`Subscribe` action for subscription to a topic. For more
information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn returned from
CreatePlatformApplication is used to create a an endpoint.
:type token: string
:param token: Unique identifier created by the notification service for
an app on a device. The specific name for Token will vary,
depending on which notification service is being used. For example,
when using APNS as the notification service, you need the device
token. Alternatively, when using GCM or ADM, the device token
equivalent is called the registration ID.
:type custom_user_data: string
:param custom_user_data: Arbitrary user data to associate with the
endpoint. SNS does not use this data. The data must be in UTF-8
format and less than 2KB.
:type attributes: map
:param attributes: For a list of attributes, see
`SetEndpointAttributes`_.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if token is not None:
params['Token'] = token
if custom_user_data is not None:
params['CustomUserData'] = custom_user_data
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='CreatePlatformEndpoint',
params=params)
def delete_endpoint(self, endpoint_arn=None):
"""
The `DeleteEndpoint` action, which is idempotent, deletes the
endpoint from SNS. For more information, see `Using Amazon SNS
Mobile Push Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn of endpoint to delete.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
return self._make_request(action='DeleteEndpoint', params=params)
def set_endpoint_attributes(self, endpoint_arn=None, attributes=None):
"""
The `SetEndpointAttributes` action sets the attributes for an
endpoint for a device on one of the supported push
notification services, such as GCM and APNS. For more
information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn used for SetEndpointAttributes action.
:type attributes: map
:param attributes:
A map of the endpoint attributes. Attributes in this map include the
following:
+ `CustomUserData` -- arbitrary user data to associate with the
endpoint. SNS does not use this data. The data must be in UTF-8
format and less than 2KB.
+ `Enabled` -- flag that enables/disables delivery to the endpoint.
Message Processor will set this to false when a notification
service indicates to SNS that the endpoint is invalid. Users can
set it back to true, typically after updating Token.
+ `Token` -- device token, also referred to as a registration id, for
an app and mobile device. This is returned from the notification
service when an app and mobile device are registered with the
notification service.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='SetEndpointAttributes',
params=params)
def get_endpoint_attributes(self, endpoint_arn=None):
"""
The `GetEndpointAttributes` retrieves the endpoint attributes
for a device on one of the supported push notification
services, such as GCM and APNS. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn for GetEndpointAttributes input.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
return self._make_request(action='GetEndpointAttributes',
params=params)
def _make_request(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb=verb,
path=path, params=params)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
| |
import json
import os
import tempfile
from zstacklib.utils import shell
# from kvmagent.plugins.bmv2_gateway_agent import exception
class Base(object):
""" Construct obj from req body
"""
k_v_mapping = {}
def __init__(self):
for v in self.k_v_mapping.values():
setattr(self, v, None)
@staticmethod
def body(req):
b_data = req.get('body', {})
if isinstance(b_data, str):
b_data = json.loads(b_data)
return b_data
def construct(self, data):
for k, v in self.k_v_mapping.items():
if k in data.keys():
setattr(self, v, data[k])
if v in data.keys():
setattr(self, v, data[v])
@classmethod
def construct_list(cls, items):
obj = cls()
for data in items:
for k, v in obj.k_v_mapping.items():
if k in data.keys():
setattr(obj, v, data[k])
yield obj
def to_json(self):
return json.dumps(
{k: getattr(self, k) for k in self.k_v_mapping.values()})
class BmInstanceObj(Base):
""" Construct a bm instance obj from req body
Bm instance part of req::
{
'bmInstance': {
'uuid': 'uuid',
'provisionIp': '192.168.101.10',
'provisionMac': '00-00-00-00-00-00',
'gatewayIp': '10.0.0.2'
}
}
"""
k_v_mapping = {
'uuid': 'uuid',
'provisionIp': 'provision_ip',
'provisionMac': 'provision_mac',
'gatewayIp': 'gateway_ip'
}
@classmethod
def from_json(cls, req):
obj = cls()
obj.construct(obj.body(req).get('bmInstance', {}))
return obj
class NetworkObj(Base):
""" Construct a network obj from req body
A req body example::
{
'provisionNetwork': {
'dhcpInterface': 'eno1',
'dhcpRangeStartIp': '10.0.201.20',
'dhcpRangeEndIp': '10.0.201.30',
'dhcpRangeNetmask': '255.255.255.0',
'dhcpRangeGateway': '10.0.201.1',
'provisionNicIp': '10.0.201.10',
'managementIp': '10.0.201.101',
'callBackIp': '10.1.1.10',
'callBackPort': '8080',
'baremetal2InstanceProxyPort': '7090'
}
}
"""
k_v_mapping = {
'dhcpInterface': 'dhcp_interface',
'dhcpRangeStartIp': 'dhcp_range_start_ip',
'dhcpRangeEndIp': 'dhcp_range_end_ip',
'dhcpRangeNetmask': 'dhcp_range_netmask',
'dhcpRangeGateway': 'dhcp_range_gateway',
'provisionNicIp': 'provision_nic_ip',
'managementIp': 'management_ip',
'callBackIp': 'callback_ip',
'callBackPort': 'callback_port',
'baremetal2InstanceProxyPort': 'baremetal_instance_proxy_port'
}
@classmethod
def from_json(cls, req):
obj = cls()
obj.construct(obj.body(req).get('provisionNetwork', {}))
bm_instance_objs = []
bm_instances = obj.body(req).get(
'provisionNetwork', {}).get('bmInstances')
if bm_instances:
for bm_instance in bm_instances:
bm_instance_obj = BmInstanceObj.from_json(
{'body': {'bmInstance': bm_instance}})
bm_instance_objs.append(bm_instance_obj)
return obj, bm_instance_objs
class VolumeObj(Base):
""" Construct a volume obj from req body
Volume part of req::
{
'volume': {
'uuid': 'uuid',
'primaryStorageType': 'NFS',
'type': 'Root/Data',
'path': '/path/to/nfs/qcow2/volume',
'format': 'qcow2'
}
}
"""
k_v_mapping = {
'uuid': 'uuid',
'primaryStorageType': 'primary_storage_type',
'type': 'type',
'path': 'path',
'format': 'format',
'deviceId': 'device_id'
}
@classmethod
def from_json(cls, req):
obj = cls()
obj.construct(obj.body(req).get('volume', {}))
return obj
@classmethod
def from_json_list(cls, req):
for vol in cls.body(req).get('volumes'):
obj = cls()
obj.construct(vol)
yield obj
class TargetcliConfObj(object):
""" Construct a object refer to targetcli configuration
Load current targetcli configuration, assume the target has only one tpg.
The loaded configuration example::
{
'storages': {
'name1': {
'dev': 'dev1',
'plugin': enum['block', 'fileio', 'pscsi', 'ramdisk'],
'wwn': 'bd8d596f-8bca-4524-97cf-19a297836df8'
},
'name2': {
'dev': 'dev2',
'plugin': enum['block', 'fileio', 'pscsi', 'ramdisk'],
'wwn': '7698dfab-ffff-4cb3-b65b-926b1fe66b84'
}
},
'targets': {
'wwn1': {
'luns': {
'storage_name1': 0,
'storage_name2': 1
},
'acls': ['node_wwn1']
},
'wwn2': {
'luns': {
'storage_name3': '3'
},
'acls': ['node_wwn2', 'node_wwn3']
}
}
}
"""
def __init__(self, volume):
self.volume = volume
self.storages = {}
self.targets = {}
self.refresh()
def refresh(self):
temp_file = tempfile.mktemp()
cmd = 'targetcli / saveconfig {temp_file}'.format(temp_file=temp_file)
shell.call(cmd)
with open(temp_file, 'r') as f:
conf_raw = json.loads(f.read())
os.remove(temp_file)
for storage_obj in conf_raw.get('storage_objects'):
name = storage_obj.get('name')
self.storages[name] = {
'dev': storage_obj.get('dev'),
'wwn': storage_obj.get('wwn'),
'plugin': storage_obj.get('plugin')
}
for target in conf_raw.get('targets'):
wwn = target.get('wwn')
tpg = target.get('tpgs')[0]
target = {}
target['acls'] = \
[x.get('node_wwn') for x in tpg.get('node_acls')]
luns = tpg.get('luns')
target['luns'] = \
{x.get('storage_object'): int(x.get('index')) for x in luns}
self.targets.update({wwn: target})
@property
def backstore(self):
return self.storages.get(self.volume.iscsi_backstore_name, {})
@property
def target(self):
return self.targets.get(self.volume.iscsi_target, {})
@property
def luns(self):
return self.targets.get(self.volume.iscsi_target, {}).get('luns', {})
@property
def acls(self):
return self.targets.get(self.volume.iscsi_target, {}).get('acls', [])
@property
def lun_exist(self):
backstore_full_path = '/backstores/block/{name}'.format(
name=self.volume.iscsi_backstore_name)
return True if backstore_full_path in self.luns else False
@property
def acl_exist(self):
return True if self.volume.iscsi_acl in self.acls else False
| |
# Webhooks for external integrations.
import re
import string
from functools import partial
from inspect import signature
from typing import Any, Dict, List, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import log_unsupported_webhook_event, webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import (
check_send_webhook_message,
validate_extract_webhook_http_header,
)
from zerver.lib.webhooks.git import (
TOPIC_WITH_BRANCH_TEMPLATE,
TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE,
get_commits_comment_action_message,
get_force_push_commits_event_message,
get_issue_event_message,
get_pull_request_event_message,
get_push_commits_event_message,
get_push_tag_event_message,
get_remove_branch_event_message,
)
from zerver.models import UserProfile
BITBUCKET_TOPIC_TEMPLATE = "{repository_name}"
BITBUCKET_FORK_BODY = "{actor} forked the repository into [{fork_name}]({fork_url})."
BITBUCKET_COMMIT_STATUS_CHANGED_BODY = (
"[System {key}]({system_url}) changed status of {commit_info} to {status}."
)
BITBUCKET_REPO_UPDATED_CHANGED = (
"{actor} changed the {change} of the **{repo_name}** repo from **{old}** to **{new}**"
)
BITBUCKET_REPO_UPDATED_ADDED = (
"{actor} changed the {change} of the **{repo_name}** repo to **{new}**"
)
PULL_REQUEST_SUPPORTED_ACTIONS = [
"approved",
"unapproved",
"created",
"updated",
"rejected",
"fulfilled",
"comment_created",
"comment_updated",
"comment_deleted",
]
ALL_EVENT_TYPES = [
"change_commit_status",
"pull_request_comment_created",
"pull_request_updated",
"pull_request_unapproved",
"push",
"pull_request_approved",
"pull_request_fulfilled",
"issue_created",
"issue_commented",
"fork",
"pull_request_comment_updated",
"pull_request_created",
"pull_request_rejected",
"repo:updated",
"issue_updated",
"commit_comment",
"pull_request_comment_deleted",
]
@webhook_view("Bitbucket2", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_bitbucket2_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
branches: Optional[str] = REQ(default=None),
user_specified_topic: Optional[str] = REQ("topic", default=None),
) -> HttpResponse:
type = get_type(request, payload)
if type == "push":
# ignore push events with no changes
if not payload["push"]["changes"]:
return json_success()
branch = get_branch_name_for_push_event(payload)
if branch and branches:
if branches.find(branch) == -1:
return json_success()
subject = get_subject_based_on_type(payload, type)
body_function = get_body_based_on_type(type)
if "include_title" in signature(body_function).parameters:
body = body_function(
payload,
include_title=user_specified_topic is not None,
)
else:
body = body_function(payload)
if type != "push":
check_send_webhook_message(
request, user_profile, subject, body, type, unquote_url_parameters=True
)
else:
for b, s in zip(body, subject):
check_send_webhook_message(
request, user_profile, s, b, type, unquote_url_parameters=True
)
return json_success()
def get_subject_for_branch_specified_events(
payload: Dict[str, Any], branch_name: Optional[str] = None
) -> str:
return TOPIC_WITH_BRANCH_TEMPLATE.format(
repo=get_repository_name(payload["repository"]),
branch=get_branch_name_for_push_event(payload) if branch_name is None else branch_name,
)
def get_push_subjects(payload: Dict[str, Any]) -> List[str]:
subjects_list = []
for change in payload["push"]["changes"]:
potential_tag = (change["new"] or change["old"] or {}).get("type")
if potential_tag == "tag":
subjects_list.append(str(get_subject(payload)))
else:
if change.get("new"):
branch_name = change["new"]["name"]
else:
branch_name = change["old"]["name"]
subjects_list.append(str(get_subject_for_branch_specified_events(payload, branch_name)))
return subjects_list
def get_subject(payload: Dict[str, Any]) -> str:
assert payload["repository"] is not None
return BITBUCKET_TOPIC_TEMPLATE.format(
repository_name=get_repository_name(payload["repository"])
)
def get_subject_based_on_type(payload: Dict[str, Any], type: str) -> Any:
if type.startswith("pull_request"):
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repository_name(payload["repository"]),
type="PR",
id=payload["pullrequest"]["id"],
title=payload["pullrequest"]["title"],
)
if type.startswith("issue"):
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repository_name(payload["repository"]),
type="issue",
id=payload["issue"]["id"],
title=payload["issue"]["title"],
)
if type == "push":
return get_push_subjects(payload)
return get_subject(payload)
def get_type(request: HttpRequest, payload: Dict[str, Any]) -> str:
if payload.get("push"):
return "push"
elif payload.get("fork"):
return "fork"
elif payload.get("comment") and payload.get("commit"):
return "commit_comment"
elif payload.get("commit_status"):
return "change_commit_status"
elif payload.get("issue"):
if payload.get("changes"):
return "issue_updated"
if payload.get("comment"):
return "issue_commented"
return "issue_created"
elif payload.get("pullrequest"):
pull_request_template = "pull_request_{}"
# Note that we only need the HTTP header to determine pullrequest events.
# We rely on the payload itself to determine the other ones.
event_key = validate_extract_webhook_http_header(request, "X_EVENT_KEY", "BitBucket")
assert event_key is not None
action = re.match("pullrequest:(?P<action>.*)$", event_key)
if action:
action_group = action.group("action")
if action_group in PULL_REQUEST_SUPPORTED_ACTIONS:
return pull_request_template.format(action_group)
else:
event_key = validate_extract_webhook_http_header(request, "X_EVENT_KEY", "BitBucket")
if event_key == "repo:updated":
return event_key
raise UnsupportedWebhookEventType(event_key)
def get_body_based_on_type(type: str) -> Any:
fn = GET_SINGLE_MESSAGE_BODY_DEPENDING_ON_TYPE_MAPPER.get(type)
return fn
def get_push_bodies(payload: Dict[str, Any]) -> List[str]:
messages_list = []
for change in payload["push"]["changes"]:
potential_tag = (change["new"] or change["old"] or {}).get("type")
if potential_tag == "tag":
messages_list.append(get_push_tag_body(payload, change))
# if change['new'] is None, that means a branch was deleted
elif change.get("new") is None:
messages_list.append(get_remove_branch_push_body(payload, change))
elif change.get("forced"):
messages_list.append(get_force_push_body(payload, change))
else:
messages_list.append(get_normal_push_body(payload, change))
return messages_list
def get_remove_branch_push_body(payload: Dict[str, Any], change: Dict[str, Any]) -> str:
return get_remove_branch_event_message(
get_actor_info(payload),
change["old"]["name"],
)
def get_force_push_body(payload: Dict[str, Any], change: Dict[str, Any]) -> str:
return get_force_push_commits_event_message(
get_actor_info(payload),
change["links"]["html"]["href"],
change["new"]["name"],
change["new"]["target"]["hash"],
)
def get_commit_author_name(commit: Dict[str, Any]) -> str:
if commit["author"].get("user"):
return get_user_info(commit["author"]["user"])
return commit["author"]["raw"].split()[0]
def get_normal_push_body(payload: Dict[str, Any], change: Dict[str, Any]) -> str:
commits_data = [
{
"name": get_commit_author_name(commit),
"sha": commit.get("hash"),
"url": commit.get("links").get("html").get("href"),
"message": commit.get("message"),
}
for commit in change["commits"]
]
return get_push_commits_event_message(
get_actor_info(payload),
change["links"]["html"]["href"],
change["new"]["name"],
commits_data,
is_truncated=change["truncated"],
)
def get_fork_body(payload: Dict[str, Any]) -> str:
return BITBUCKET_FORK_BODY.format(
actor=get_user_info(payload["actor"]),
fork_name=get_repository_full_name(payload["fork"]),
fork_url=get_repository_url(payload["fork"]),
)
def get_commit_comment_body(payload: Dict[str, Any]) -> str:
comment = payload["comment"]
action = "[commented]({})".format(comment["links"]["html"]["href"])
return get_commits_comment_action_message(
get_actor_info(payload),
action,
comment["commit"]["links"]["html"]["href"],
comment["commit"]["hash"],
comment["content"]["raw"],
)
def get_commit_status_changed_body(payload: Dict[str, Any]) -> str:
commit_api_url = payload["commit_status"]["links"]["commit"]["href"]
commit_id = commit_api_url.split("/")[-1]
commit_info = "[{short_commit_id}]({repo_url}/commits/{commit_id})".format(
repo_url=get_repository_url(payload["repository"]),
short_commit_id=commit_id[:7],
commit_id=commit_id,
)
return BITBUCKET_COMMIT_STATUS_CHANGED_BODY.format(
key=payload["commit_status"]["key"],
system_url=payload["commit_status"]["url"],
commit_info=commit_info,
status=payload["commit_status"]["state"],
)
def get_issue_commented_body(payload: Dict[str, Any], include_title: bool = False) -> str:
action = "[commented]({}) on".format(payload["comment"]["links"]["html"]["href"])
return get_issue_action_body(payload, action, include_title)
def get_issue_action_body(payload: Dict[str, Any], action: str, include_title: bool = False) -> str:
issue = payload["issue"]
assignee = None
message = None
if action == "created":
if issue["assignee"]:
assignee = get_user_info(issue["assignee"])
message = issue["content"]["raw"]
return get_issue_event_message(
get_actor_info(payload),
action,
issue["links"]["html"]["href"],
issue["id"],
message,
assignee,
title=issue["title"] if include_title else None,
)
def get_pull_request_action_body(
payload: Dict[str, Any], action: str, include_title: bool = False
) -> str:
pull_request = payload["pullrequest"]
return get_pull_request_event_message(
get_actor_info(payload),
action,
get_pull_request_url(pull_request),
pull_request.get("id"),
title=pull_request["title"] if include_title else None,
)
def get_pull_request_created_or_updated_body(
payload: Dict[str, Any], action: str, include_title: bool = False
) -> str:
pull_request = payload["pullrequest"]
assignee = None
if pull_request.get("reviewers"):
assignee = get_user_info(pull_request.get("reviewers")[0])
return get_pull_request_event_message(
get_actor_info(payload),
action,
get_pull_request_url(pull_request),
pull_request.get("id"),
target_branch=pull_request["source"]["branch"]["name"],
base_branch=pull_request["destination"]["branch"]["name"],
message=pull_request["description"],
assignee=assignee,
title=pull_request["title"] if include_title else None,
)
def get_pull_request_comment_created_action_body(
payload: Dict[str, Any],
include_title: bool = False,
) -> str:
action = "[commented]({})".format(payload["comment"]["links"]["html"]["href"])
return get_pull_request_comment_action_body(payload, action, include_title)
def get_pull_request_deleted_or_updated_comment_action_body(
payload: Dict[str, Any],
action: str,
include_title: bool = False,
) -> str:
action = "{} a [comment]({})".format(action, payload["comment"]["links"]["html"]["href"])
return get_pull_request_comment_action_body(payload, action, include_title)
def get_pull_request_comment_action_body(
payload: Dict[str, Any],
action: str,
include_title: bool = False,
) -> str:
action += " on"
return get_pull_request_event_message(
get_actor_info(payload),
action,
payload["pullrequest"]["links"]["html"]["href"],
payload["pullrequest"]["id"],
message=payload["comment"]["content"]["raw"],
title=payload["pullrequest"]["title"] if include_title else None,
)
def get_push_tag_body(payload: Dict[str, Any], change: Dict[str, Any]) -> str:
if change.get("new"):
tag = change["new"]
action = "pushed"
elif change.get("old"):
tag = change["old"]
action = "removed"
return get_push_tag_event_message(
get_actor_info(payload),
tag.get("name"),
tag_url=tag["links"]["html"].get("href"),
action=action,
)
def append_punctuation(title: str, message: str) -> str:
if title[-1] not in string.punctuation:
message = f"{message}."
return message
def get_repo_updated_body(payload: Dict[str, Any]) -> str:
changes = ["website", "name", "links", "language", "full_name", "description"]
body = ""
repo_name = payload["repository"]["name"]
actor = get_actor_info(payload)
for change in changes:
new = payload["changes"][change]["new"]
old = payload["changes"][change]["old"]
if change == "full_name":
change = "full name"
if new and old:
message = BITBUCKET_REPO_UPDATED_CHANGED.format(
actor=actor,
change=change,
repo_name=repo_name,
old=old,
new=new,
)
message = append_punctuation(new, message) + "\n"
body += message
elif new and not old:
message = BITBUCKET_REPO_UPDATED_ADDED.format(
actor=actor,
change=change,
repo_name=repo_name,
new=new,
)
message = append_punctuation(new, message) + "\n"
body += message
return body
def get_pull_request_url(pullrequest_payload: Dict[str, Any]) -> str:
return pullrequest_payload["links"]["html"]["href"]
def get_repository_url(repository_payload: Dict[str, Any]) -> str:
return repository_payload["links"]["html"]["href"]
def get_repository_name(repository_payload: Dict[str, Any]) -> str:
return repository_payload["name"]
def get_repository_full_name(repository_payload: Dict[str, Any]) -> str:
return repository_payload["full_name"]
def get_user_info(dct: Dict[str, Any]) -> str:
# See https://developer.atlassian.com/cloud/bitbucket/bitbucket-api-changes-gdpr/
# Since GDPR, we don't get username; instead, we either get display_name
# or nickname.
if "display_name" in dct:
return dct["display_name"]
if "nickname" in dct:
return dct["nickname"]
# We call this an unsupported_event, even though we
# are technically still sending a message.
log_unsupported_webhook_event(
summary="Could not find display_name/nickname field",
)
return "Unknown user"
def get_actor_info(payload: Dict[str, Any]) -> str:
actor = payload["actor"]
return get_user_info(actor)
def get_branch_name_for_push_event(payload: Dict[str, Any]) -> Optional[str]:
change = payload["push"]["changes"][-1]
potential_tag = (change["new"] or change["old"] or {}).get("type")
if potential_tag == "tag":
return None
else:
return (change["new"] or change["old"]).get("name")
GET_SINGLE_MESSAGE_BODY_DEPENDING_ON_TYPE_MAPPER = {
"fork": get_fork_body,
"commit_comment": get_commit_comment_body,
"change_commit_status": get_commit_status_changed_body,
"issue_updated": partial(get_issue_action_body, action="updated"),
"issue_created": partial(get_issue_action_body, action="created"),
"issue_commented": get_issue_commented_body,
"pull_request_created": partial(get_pull_request_created_or_updated_body, action="created"),
"pull_request_updated": partial(get_pull_request_created_or_updated_body, action="updated"),
"pull_request_approved": partial(get_pull_request_action_body, action="approved"),
"pull_request_unapproved": partial(get_pull_request_action_body, action="unapproved"),
"pull_request_fulfilled": partial(get_pull_request_action_body, action="merged"),
"pull_request_rejected": partial(get_pull_request_action_body, action="rejected"),
"pull_request_comment_created": get_pull_request_comment_created_action_body,
"pull_request_comment_updated": partial(
get_pull_request_deleted_or_updated_comment_action_body, action="updated"
),
"pull_request_comment_deleted": partial(
get_pull_request_deleted_or_updated_comment_action_body, action="deleted"
),
"push": get_push_bodies,
"repo:updated": get_repo_updated_body,
}
| |
""" PyPTV_BATCH is the script for the 3D-PTV (http://ptv.origo.ethz.ch) written in
Python/Enthought Traits GUI/Numpy/Chaco
Example:
>> python pyptv_batch.py experiments/exp1 10001 10022
where 10001 is the first file in sequence and 10022 is the last one
the present "active" parameters are kept intact except the sequence
"""
# from scipy.misc import imread
import os
import sys
import numpy as np
# project specific inputs
import parameters as par
import general
import time
# directory from which we run the software
cwd = os.getcwd()
# import pdb; pdb.set_trace()
def sequence_tracking(n_img):
# get following variables from the parameters:
# n_camera, seq_first, seq_last, base_name
import ptv1 as ptv
sequenceParams = par.SequenceParams(n_img, path=par.temp_path)
sequenceParams.read()
(base_name, seq_first, seq_last) = (
sequenceParams.base_name, sequenceParams.first, sequenceParams.last)
print ("Starting sequence action")
ptv.py_sequence_init(0)
stepshake = ptv.py_get_from_sequence_init()
if not stepshake:
stepshake = 1
print stepshake
temp_img = np.array([], dtype=np.ubyte)
for i in range(seq_first, seq_last + 1, stepshake):
if i < 10:
seq_ch = "%01d" % i
elif i < 100:
seq_ch = "%02d" % i
else:
seq_ch = "%03d" % i
for j in range(n_img):
img_name = base_name[j] + seq_ch
print ("Setting image: ", img_name)
try:
temp_img = imread(img_name).astype(np.ubyte)
except:
print "Error reading file"
ptv.py_set_img(temp_img, j)
ptv.py_sequence_loop(0, i)
# forward tracking
run_info = ptv.py_trackcorr_init()
print run_info.get_sequence_range()
for step in range(*run_info.get_sequence_range()):
print step
ptv.py_trackcorr_loop(run_info, step, display=0)
ptv.py_trackcorr_finish(run_info, step + 1)
print "tracking without display finished"
# RON - cancled back tracking due to bug
ptv.py_trackback_c()
print "tracking backwards is finished"
def sequence(n_img):
# get following variables from the parameters:
# n_camera, seq_first, seq_last, base_name
import ptv1 as ptv
sequenceParams = par.SequenceParams(n_img, path=par.temp_path)
sequenceParams.read()
(base_name, seq_first, seq_last) = (
sequenceParams.base_name, sequenceParams.first, sequenceParams.last)
print ("Starting sequence action")
ptv.py_sequence_init(0)
stepshake = ptv.py_get_from_sequence_init()
if not stepshake:
stepshake = 1
print stepshake
temp_img = np.array([], dtype=np.ubyte)
for i in range(seq_first, seq_last + 1, stepshake):
if i < 10:
seq_ch = "%01d" % i
elif i < 100:
seq_ch = "%02d" % i
else:
seq_ch = "%03d" % i
for j in range(n_img):
img_name = base_name[j] + seq_ch
print ("Setting image: ", img_name)
try:
temp_img = imread(img_name).astype(np.ubyte)
except:
print "Error reading file"
ptv.py_set_img(temp_img, j)
ptv.py_sequence_loop(0, i)
def run_batch(new_seq_first, new_seq_last):
# import pdb; pdb.set_trace()
import ptv1 as ptv
ptv.py_init_proc_c()
ptv.py_start_proc_c() # or ptv.py_init_proc_c()?
ptvParams = par.PtvParams(path=par.temp_path)
ptvParams.read()
(n_img, img_name, img_cal, hp_flag, allCam_flag, tiff_flag, imx, imy, pix_x, pix_y, chfield, mmp_n1, mmp_n2, mmp_n3, mmp_d) = \
(ptvParams.n_img, ptvParams.img_name, ptvParams.img_cal, ptvParams.hp_flag, ptvParams.allCam_flag, ptvParams.tiff_flag,
ptvParams.imx, ptvParams.imy, ptvParams.pix_x, ptvParams.pix_y, ptvParams.chfield, ptvParams.mmp_n1, ptvParams.mmp_n2, ptvParams.mmp_n3, ptvParams.mmp_d)
# read the sequence parameters
sequenceParams = par.SequenceParams(n_img, path=par.temp_path)
sequenceParams.read()
(base_name, seq_first, seq_last) = (
sequenceParams.base_name, sequenceParams.first, sequenceParams.last)
# write the new sequence parameters
par.SequenceParams(n_img, base_name,
new_seq_first, new_seq_last, path=par.temp_path).write()
# if you need sequence and tracking:
sequence_tracking(n_img)
# if you need sequence only:
# sequence(n_img)
def main(sys_argv, repetitions=1):
""" runs the batch
Usage:
main([software_path, exp_dir, first, last], [repetitions])
Parameters:
list of 4 parameters in this order:
software_path : directory of pyptv_batch.py
exp_dir : directory with the experiment data
first, last : integer, number of a first and last frame
repetitions : int, default = 1, optional
"""
software_path = os.path.split(os.path.abspath(sys_argv[0]))[0]
print 'software_path=', software_path
try:
os.chdir(software_path)
except:
raise ValueError("Error in instalation or software path")
import string
src_path = string.replace(software_path,'pyptv_gui','src_c')
print('Source path for ptv1.so is %s' % src_path)
sys.path.append(src_path)
import ptv1 as ptv
start = time.time()
try:
exp_path = os.path.abspath(sys_argv[1])
print('exp_path= %s' % exp_path)
os.chdir(exp_path)
print(os.getcwd())
except:
raise ValueError('Wrong experimental directory %s' % exp_path)
# RON - make a res dir if it not found
if 'res' not in os.listdir(sys_argv[1]):
print " 'res' folder not found. creating one"
os.makedirs(os.path.join(sys_argv[1],'res'))
for i in range(repetitions):
try: # strings
seq_first = eval(sys_argv[2])
seq_last = eval(sys_argv[3])
except: # integers
seq_first = sys_argv[2]
seq_last = sys_argv[3]
try:
run_batch(seq_first, seq_last)
except:
print("something wrong with the software or folder")
general.printException()
end = time.time()
print 'time lapsed %f sec' % (end - start)
if __name__ == '__main__':
""" pyptv_batch.py enables to run a sequence without GUI
It can run from a command shell:
python pyptv_batch.py ~/test_cavity 10000 10004
or from Python:
import sys, os
sys.path.append(os.path.abspath('openptv/openptv-python/pyptv_gui'))
from pyptv_batch import main
batch_command = '/openptv/openptv-python/pyptv_gui/pyptv_batch.py'
PyPTV_working_directory = '/openptv/Working_folder/'
mi,mx = 65119, 66217
main([batch_command,PyPTV_working_directory, mi, mx])
"""
if len(sys.argv) < 4:
print("Wrong number of inputs, usage: python pyptv_batch.py \
experiments/exp1 seq_first seq_last")
raise ValueError('wrong number of inputs')
main(sys.argv)
| |
import sys, os, shutil
from datetime import datetime
#from git import Repo, GitCommandError
join = os.path.join
import_path = os.path.abspath('..')
sys.path.append(import_path)
from dkrz_forms import form_handler, utils
#from dkrz_forms.config.settings import INSTALL_DIRECTORY, SUBMISSION_REPO, NOTEBOOK_DIRECTORY,FORM_URL_PATH, FORM_DIRECTORY
#from dkrz_forms.config.project_config import PROJECT_DICT
#from dkrz_forms.config.workflow_steps import WORKFLOW_DICT
from dkrz_forms.config.settings import FORM_DIRECTORY
init_form = {}
init_form['first_name'] = "unit_tester"
init_form['last_name'] = "testsuite"
init_form['project'] = "test"
init_form['email'] = "stephan.kindermann@gmail.com"
init_form['key'] = "1"
init_form['pwd'] = "test1"
form_repo = join(FORM_DIRECTORY, init_form['project'])
#print test_config.cordex_directory
FORM_JSON = join(form_repo,init_form['project']+'_'+init_form['last_name']+'_'+init_form['key']+'.json')
# get workflow steps
#(submission,ingest,checking,publish) = form_handler.get_workflow_steps()
#print submission.__dict__
#def test_me():
# assert form_repo == os.path.abspath("....")
# activity 'status':'0:initialized, 1:generated,2:checked, 2:incomplete,3:submitted,4:re-opened,5:re-submitted',
# entity out 'status': '0:open,1:stored,2:submitted,2:accepted',
# entity_out 'check_status' : "0:open,1:warning,2:error,3:ok",
def now():
return str(datetime.now())
def test_init_form():
global sf
global init_form
global form_repo
utils.init_git_repo(form_repo)
sf = form_handler.init_form(init_form)
assert sf.sub.activity.status == "0:open"
assert os.path.exists(form_repo) == 1
#assert sf.sub.entity_out.form_repo == FORM_REPO+'/'+init_form['project']
assert sf.sub.agent.last_name == "testsuite"
def test_generate_submission_form():
## takes myconfig from .dkrz_forms if existing !!
global sf
global init_form
sf = form_handler.generate_submission_form(init_form)
# .. to do .. make some changes to sf ..
sf = form_handler.save_form(sf,"test_generate_submission")
assert sf.sub.activity.status =="1:in-progress"
#assert sf.form_dir == form_repo
assert sf.sub.agent.last_name == "testsuite"
# assert sf.sub.activity. .. --> to do
files = os.listdir(form_repo)
assert sf.sub.entity_out.form_name+".ipynb" in files
assert sf.sub.entity_out.form_name+".json" in files
def test_form_completion():
## reads form json file and returns hierachical form object
global sf
workflow_form = utils.load_workflow_form(FORM_JSON)
submission = workflow_form.sub
submission.activity.start_time = now()
submission.activity.status = "2:action-required"
submission.activity.error_status = "1:ok"
sf = form_handler.save_form(workflow_form, "test: formcompletion()")
assert sf.sub.entity_out.status == '1:stored'
assert sf.sub.activity.status == '2:action-required'
def test_form_submission():
global sf
workflow_form = utils.load_workflow_form(FORM_JSON)
submission = workflow_form.sub
#to do: fix submission test
submission.entity_out.submission_json = "tst.json"
submission.entity_out.submission_form = "tst.ipynb"
submission.entity_out.submission_repo = "."
submission.activity.ticket_id = 22949
submission.activity.ticket_url = "https://dm-rt.dkrz.de/Ticket/Display.html?id="
submission.entity_out.check_status = "3:ok"
submission.entity_out.status = "1:stored"
submission.entity_out.checks_done = "consistency with templates"
submission.activity.method = "form_based"
workflow_form.rev.entity_in = submission.entity_out
sf = form_handler.save_form(workflow_form, "test: form_submission")
assert sf.sub.activity.ticket_id == 22949
## to do: fill form - check validity - perform submit, check, ingest and publication steps ..
def test_form_review():
global sf
workflow_form = utils.load_workflow_form(FORM_JSON)
review = workflow_form.rev
review.activity.ticket_url="https://dm-rt.dkrz.de/Ticket/Display.html?"
review.agent.responsible_person = "dkrz data manager name"
review.activity.status = "1:in-progress"
review.activity.status = "4:closed"
review.activity.error_status = "1:ok"
review.activity.start_time = now()
review.activity.ticket_id = "1:testticket"
review.entity_out.check_status = "3:ok"
review.entity_out.status = "1:stored"
review.entity_out.date = now()
workflow_form.ing.entity_in = review.entity_out
sf = form_handler.save_form(workflow_form, "test: form_review()")
assert sf.rev.activity.status == "4:closed"
def test_data_ingest():
global sf
workflow_form = utils.load_workflow_form(FORM_JSON)
ingest = workflow_form.ing
ingest.activity.status = "1:in-progress"
ingest.agent.responsible_person = "lenzen"
ingest.activity.comment = " copying data from ... to ... using ... "
ingest.entity_out.report.data_target_directory = "/work/kd0956/cmip5/ingest/cmip5/mpi-m/test"
ingest.entity_out.report.data_file_pattern = "cmip5"
ingest.activity.status = "2:action-required"
ingest.activity.status = "4:closed"
ingest.activity.error_status = "1:ok"
ingest.activity.ticket_id = "1:testticket"
ingest.entity_out.check_status = "3:ok"
ingest.entity_out.status = "1:stored"
ingest.activity.start_time = now()
ingest.entity_out.date = now()
ingest.entity_out.status = "1:stored"
workflow_form.qua.entity_in = ingest.entity_out
sf = form_handler.save_form(workflow_form,"test: data_ingest()")
assert sf.ing.activity.status == "4:closed"
def test_data_quality_assurance():
global sf
workflow_form = utils.load_workflow_form(FORM_JSON)
test_report = {
"QA_conclusion": "PASS",
"project": "CORDEX",
"model": "KNMI-RACMO22E",
"domain": "EUR-11",
"driving_experiment": [ "ICHEC-EC-EARTH" ],
"experiment": [ "historical" ],
"ensemble_member": [ "r12i1p1" ],
"annotation":
[
{
"scope": [ "mon", "sem" ],
"variable": [ "sfcWindmax", "sund", "tasmax", "tasmin" ],
"caption": "Attribute <cell_methods> entails <time>:climatology instead of <time>:time_bnds",
"comment": "Here, data of variable climatology is equivalent to time_bnds",
"severity": "note"
}
]
}
qua = workflow_form.qua
qua.activity.status = "1:in-progress"
qua.agent.responsible_person = "hdh"
qua.activity.comment = "on lizzard "
qua.activity.qua_tool_version = "dkrz_qa_v09"
qua.activity.start_time = now()
qua.entity_out.report = test_report
qua.activity.status = "2:action-required"
qua.activity.status = "4:closed"
qua.activity.error_status = "1:ok"
qua.activity.ticket_id = "1:testticket"
qua.entity_out.check_status = "3:ok"
qua.entity_out.status = "1:stored"
qua.entity_out.date = now()
workflow_form.pub.entity_in = qua.entity_out
sf = form_handler.save_form(workflow_form, "test: quality_assurance()")
assert sf.qua.activity.status == "4:closed"
def test_data_publication():
global sf
workflow_form = utils.load_workflow_form(FORM_JSON)
publication = workflow_form.pub
publication.activity.status = "1_in-progress"
publication.activity.start_time = now()
publication.agent.responsible_person = "berger"
publication.agent.trigger="other"
publication.activity.timestamp = "2016-05-20 18:34:28.934536"
publication.entity_out.date = "2016-05-20"
publication.entity_out.report = "....report on publication...."
publication.entity_out.search_string = " ... "
publication.activity.status = "4:closed"
publication.activity.error_status = "1:ok"
publication.entity_out.check_status = "3:ok"
publication.entity_out.status = "1:stored"
publication.activity.ticket_id = "1:testticket"
# workflow_form.lta ...
sf = form_handler.save_form(workflow_form, "test: publication()")
assert sf.pub.activity.status == "4:closed"
def test_data_archival():
pass
| |
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Monotonic addition of entities."""
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from google.appengine.runtime import apiproxy_errors
from components import utils
from . import txn
__all__ = [
'HIGH_KEY_ID',
'Root',
'get_versioned_most_recent',
'get_versioned_most_recent_async',
'get_versioned_most_recent_with_root',
'get_versioned_most_recent_with_root_async',
'get_versioned_root_model',
'insert',
'insert_async',
'store_new_version',
'store_new_version_async',
]
# 2^53 is the largest that can be represented with a float. It's a bit large
# though so save a bit and start at 2^48-1.
HIGH_KEY_ID = (1 << 47) - 1
### Private stuff.
### Public API.
class Root(ndb.Model):
"""Root entity used for store_new_version() and get_versioned_most_recent().
Either inherit from this class or use get_versioned_root_model().
"""
# Key id of the most recent child entity in the DB. It is monotonically
# decreasing starting at HIGH_KEY_ID. It is None if no child is present.
current = ndb.IntegerProperty(indexed=False)
@ndb.tasklet
def insert_async(entity, new_key_callback=None, extra=None):
"""Inserts an entity in the DB and guarantees creation.
Similar in principle to ndb.Model.get_or_insert() except that it only succeeds
when the entity was not already present. As such, this always requires a
transaction.
Optionally retries with a new key if |new_key_callback| is provided.
Arguments:
entity: entity to save, it should have its .key already set accordingly. The
.key property will be mutated, even if the function fails. It is highly
preferable to have a root entity so the transaction can be done safely.
new_key_callback: function to generates a new key if the previous key was
already taken. If this function returns None, the execution is aborted.
If this parameter is None, insertion is only tried once.
May return a future.
extra: additional entities to store simultaneously. For example a bookeeping
entity that must be updated simultaneously along with |entity|. All the
entities must be inside the same entity group. This function is not safe
w.r.t. `extra`, entities in this list will overwrite entities already in
the DB or have their key updated when new_key_callback() is called.
Returns:
ndb.Key of the newly saved entity or None if the entity was already present
in the db.
"""
assert not ndb.in_transaction()
assert entity.key.id(), entity.key
entities = [entity]
if extra:
entities.extend(extra)
root = entity.key.pairs()[0]
assert all(i.key and i.key.pairs()[0] == root for i in extra), extra
def new_key_callback_async():
key = None
if new_key_callback:
key = new_key_callback()
if isinstance(key, ndb.Future):
return key
future = ndb.Future()
future.set_result(key)
return future
@ndb.tasklet
def run():
if (yield entities[0].key.get_async()):
# The entity exists, abort.
raise ndb.Return(False)
yield ndb.put_multi_async(entities)
raise ndb.Return(True)
# TODO(maruel): Run a severe load test and count the number of retries.
while True:
# First iterate outside the transaction in case the first entity key number
# selected is already used.
while entity.key and entity.key.id() and (yield entity.key.get_async()):
entity.key = yield new_key_callback_async()
if not entity.key or not entity.key.id():
break
try:
if (yield txn.transaction_async(run, retries=0)):
break
except txn.CommitError:
# Retry with the same key.
pass
else:
# Entity existed. Get the next key.
entity.key = yield new_key_callback_async()
raise ndb.Return(entity.key)
insert = utils.sync_of(insert_async)
def get_versioned_root_model(model_name):
"""Returns a root model that can be used for versioned entities.
Using this entity for get_versioned_most_recent(),
get_versioned_most_recent_with_root() and store_new_version() is optional. Any
entity with cls.current as an ndb.IntegerProperty will do.
"""
assert isinstance(model_name, str), model_name
class _Root(Root):
@classmethod
def _get_kind(cls):
return model_name
return _Root
@ndb.tasklet
def get_versioned_most_recent_async(cls, root_key):
"""Returns the most recent entity of cls child of root_key."""
_, entity = yield get_versioned_most_recent_with_root_async(cls, root_key)
raise ndb.Return(entity)
get_versioned_most_recent = utils.sync_of(get_versioned_most_recent_async)
@ndb.tasklet
def get_versioned_most_recent_with_root_async(cls, root_key):
"""Returns the most recent instance of a versioned entity and the root entity.
Getting the root entity is needed to get the current index.
"""
# Using a cls.query(ancestor=root_key).get() would work too but is less
# efficient since it can't be cached by ndb's cache.
assert not ndb.in_transaction()
assert issubclass(cls, ndb.Model), cls
assert root_key is None or isinstance(root_key, ndb.Key), root_key
root = root_key.get()
if not root or not root.current:
raise ndb.Return(None, None)
entity = yield ndb.Key(cls, root.current, parent=root_key).get_async()
raise ndb.Return(root, entity)
get_versioned_most_recent_with_root = utils.sync_of(
get_versioned_most_recent_with_root_async
)
@ndb.tasklet
def store_new_version_async(entity, root_cls, extra=None):
"""Stores a new version of the instance.
entity.key is updated to the key used to store the entity. Only the parent key
needs to be set. E.g. Entity(parent=ndb.Key(ParentCls, ParentId), ...) or
entity.key = ndb.Key(Entry, None, ParentCls, ParentId).
If there was no root entity in the DB, one is created by calling root_cls().
Fetch for root entity is not done in a transaction, so this function is unsafe
w.r.t. root content.
Arguments:
entity: ndb.Model entity to append in the DB.
root_cls: class returned by get_versioned_root_model().
extra: extraneous entities to put in the transaction. They must all be in
the same entity group.
Returns:
tuple(root, entity) with the two entities that were PUT in the db.
"""
assert not ndb.in_transaction()
assert isinstance(entity, ndb.Model), entity
assert entity.key and entity.key.parent(), 'entity.key.parent() must be set.'
# Access to a protected member _XX of a client class - pylint: disable=W0212
assert root_cls._properties.keys() == ['current'], (
'This function is unsafe for root entity, use store_new_version_safe '
'which is not yet implemented')
root_key = entity.key.parent()
root = (yield root_key.get_async()) or root_cls(key=root_key)
root.current = root.current or HIGH_KEY_ID
flat = list(entity.key.flat())
flat[-1] = root.current
entity.key = ndb.Key(flat=flat)
def _new_key_minus_one_current():
flat[-1] -= 1
root.current = flat[-1]
return ndb.Key(flat=flat)
extra = (extra or [])[:]
extra.append(root)
result = yield insert_async(entity, _new_key_minus_one_current, extra=extra)
raise ndb.Return(result)
store_new_version = utils.sync_of(store_new_version_async)
| |
"""
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
cuda4py - CUDA cffi bindings and helper classes.
URL: https://github.com/ajkxyz/cuda4py
Original author: Alexey Kazantsev <a.kazantsev@samsung.com>
"""
"""
Tests some of the api in cuda4py package.
"""
import gc
import logging
import cuda4py as cu
try:
import numpy
except ImportError:
pass
import os
import threading
import unittest
class Test(unittest.TestCase):
def setUp(self):
self.old_env = os.environ.get("CUDA_DEVICE")
if self.old_env is None:
os.environ["CUDA_DEVICE"] = "0"
self.path = os.path.dirname(__file__)
if not len(self.path):
self.path = "."
def tearDown(self):
if self.old_env is None:
del os.environ["CUDA_DEVICE"]
else:
os.environ["CUDA_DEVICE"] = self.old_env
del self.old_env
gc.collect()
def test_devices(self):
logging.debug("ENTER: test_devices")
devices = cu.Devices()
logging.debug("Found %d CUDA device%s", len(devices),
"" if len(devices) <= 1 else "s")
for i, device in enumerate(devices):
logging.debug("%d: %s", i, device.name)
if not len(devices):
return
logging.debug("Selecting device 0")
d = devices[0]
self.assertEqual(d.handle, int(d.handle))
logging.debug("It's name is %s", d.name)
logging.debug("It's total mem is %d", d.total_mem)
logging.debug("It's compute capability is %d_%d",
*d.compute_capability)
logging.debug("It's pci bus id: %s", d.pci_bus_id)
logging.debug("Trying to get device by it's pci id")
d2 = cu.Device(d.pci_bus_id)
self.assertEqual(d2.handle, d.handle)
logging.debug("Succeeded")
logging.debug("EXIT: test_devices")
def test_attributes(self):
d = cu.Devices()[0]
self.assertIsInstance(d.unified_addressing, bool)
self.assertGreater(d.warp_size, 0)
self.assertGreater(d.max_threads_per_block, 0)
self.assertGreaterEqual(d.max_shared_memory_per_block, 0)
xyz = d.max_block_dims
self.assertIsInstance(xyz, tuple)
self.assertEqual(len(xyz), 3)
for x in xyz:
self.assertGreater(x, 0)
xyz = d.max_grid_dims
self.assertIsInstance(xyz, tuple)
self.assertEqual(len(xyz), 3)
for x in xyz:
self.assertGreater(x, 0)
self.assertGreater(d.max_registers_per_block, 0)
self.assertGreater(d.clock_rate, 0)
self.assertGreater(d.memory_clock_rate, 0)
self.assertGreaterEqual(d.total_constant_memory, 0)
self.assertGreater(d.multiprocessor_count, 0)
self.assertGreaterEqual(d.kernel_exec_timeout, 0)
self.assertIsInstance(d.integrated, bool)
self.assertIsInstance(d.can_map_host_memory, bool)
self.assertIsInstance(d.concurrent_kernels, bool)
self.assertIsInstance(d.ecc_enabled, bool)
self.assertGreater(d.memory_bus_width, 0)
self.assertGreaterEqual(d.l2_cache_size, 0)
self.assertGreater(d.max_threads_per_multiprocessor, 0)
self.assertGreaterEqual(d.async_engine_count, 0)
self.assertIsInstance(d.stream_priorities_supported, bool)
self.assertIsInstance(d.global_l1_cache_supported, bool)
self.assertIsInstance(d.local_l1_cache_supported, bool)
self.assertGreaterEqual(d.max_shared_memory_per_multiprocessor, 0)
self.assertGreater(d.max_registers_per_multiprocessor, 0)
self.assertIsInstance(d.managed_memory, bool)
self.assertIsInstance(d.multi_gpu_board, bool)
self.assertGreaterEqual(d.multi_gpu_board_group_id, 0)
self.assertGreaterEqual(d.max_pitch, 0)
def test_extract_ptr(self):
a = numpy.zeros(127, dtype=numpy.float32)
ptr = cu.CU.extract_ptr(a)
self.assertEqual(ptr, int(a.__array_interface__["data"][0]))
ptr2, sz = cu.CU.extract_ptr_and_size(a, None)
self.assertEqual(ptr, ptr2)
self.assertEqual(sz, a.nbytes)
ptr = cu.CU.extract_ptr(None)
self.assertEqual(ptr, 0)
ptr2, sz = cu.CU.extract_ptr_and_size(None, 0)
self.assertEqual(ptr2, 0)
self.assertEqual(sz, 0)
def test_dump_devices(self):
logging.debug("ENTER: test_dump_devices")
logging.debug("Available CUDA devices:\n%s",
cu.Devices().dump_devices())
logging.debug("EXIT: test_dump_devices")
def _fill_retval(self, retval, target, args):
retval[0], retval[1] = target(*args)
def _run_on_thread(self, target, args):
retval = [None, None, None]
thread = threading.Thread(target=self._fill_retval,
args=(retval, target, args))
thread.start()
thread.join(5)
if thread.is_alive():
raise TimeoutError()
return retval[0], retval[1]
def _check_push_pop(self, ctx):
ctx.push_current()
h = cu.Context.get_current()
ctx.pop_current()
h0 = cu.Context.get_current()
return h, h0
def _check_with(self, ctx):
with ctx:
logging.debug("Inside with statement")
h = cu.Context.get_current()
h0 = cu.Context.get_current()
return h, h0
def _check_set_current(self, ctx):
ctx.set_current()
return cu.Context.get_current(), None
def test_context(self):
logging.debug("ENTER: test_context")
ctx = cu.Devices().create_some_context()
logging.debug("Context created")
self.assertEqual(ctx.handle, cu.Context.get_current())
h, h0 = self._run_on_thread(self._check_push_pop, (ctx,))
self.assertEqual(h, ctx.handle)
self.assertEqual(h0, 0)
logging.debug("push/pop succeeded")
h, h0 = self._run_on_thread(self._check_with, (ctx,))
self.assertEqual(h, ctx.handle)
self.assertEqual(h0, 0)
logging.debug("with succeeded")
self.assertEqual(
self._run_on_thread(self._check_set_current, (ctx,))[0],
ctx.handle)
logging.debug("set_current succeeded")
logging.debug("EXIT: test_context")
def test_module(self):
logging.debug("ENTER: test_module")
ctx = cu.Devices().create_some_context()
module = cu.Module(ctx, source_file="%s/test.cu" % self.path)
self.assertIsNotNone(module.handle)
self.assertIsNotNone(ctx.handle)
logging.debug("nvcc compilation succeeded")
logging.debug("Resulted ptx code is:\n%s", module.ptx.decode("utf-8"))
logging.debug("Will try Context.create_module")
module = ctx.create_module(source_file="%s/test.cu" % self.path)
self.assertIsNotNone(module.handle)
self.assertIsNotNone(ctx.handle)
logging.debug("Succeeded")
logging.debug("Will try to compile with includes")
module = cu.Module(ctx, source_file="%s/inc.cu" % self.path,
include_dirs=("", self.path, ""))
self.assertIsNotNone(module.handle)
self.assertIsNotNone(ctx.handle)
logging.debug("Succeeded")
logging.debug("Will try to compile with source")
module = cu.Module(ctx, source="#include \"inc.cu\"",
include_dirs=(self.path,))
self.assertIsNotNone(module.handle)
self.assertIsNotNone(ctx.handle)
logging.debug("Succeeded")
logging.debug("Testing get_func, get_global")
with ctx:
self.assertIsNotNone(module.get_func("test"))
ptr, size = module.get_global("g_a")
self.assertEqual(ptr, int(ptr))
self.assertEqual(size, 4)
logging.debug("Succeeded")
logging.debug("EXIT: test_module")
def _test_alloc(self, alloc, test=None):
mem = alloc(4096)
self.assertEqual(mem.handle, int(mem.handle))
self.assertEqual(mem.handle, int(mem))
self.assertEqual(mem.size, 4096)
self.assertIsNotNone(mem.handle)
if test is not None:
test(mem)
a = numpy.random.rand(4096).astype(numpy.float32)
mem = alloc(a)
b = numpy.zeros_like(a)
mem.to_host(b)
max_diff = float(numpy.fabs(a - b).max())
self.assertEqual(max_diff, 0.0)
if test is not None:
test(mem)
def test_mem_alloc(self):
logging.debug("ENTER: test_mem_alloc")
ctx = cu.Devices().create_some_context()
self._test_alloc(lambda a: cu.MemAlloc(ctx, a))
self._test_alloc(ctx.mem_alloc)
logging.debug("MemAlloc succeeded")
logging.debug("EXIT: test_mem_alloc")
def test_mem_alloc_managed(self):
logging.debug("ENTER: test_mem_alloc_managed")
ctx = cu.Devices().create_some_context()
self._test_alloc(lambda a: cu.MemAllocManaged(ctx, a))
self._test_alloc(ctx.mem_alloc_managed)
logging.debug("MemAllocManaged succeeded")
logging.debug("EXIT: test_mem_alloc_managed")
def test_mem_host_alloc(self):
logging.debug("ENTER: test_mem_host_alloc")
ctx = cu.Devices().create_some_context()
def test(mem):
devptr = mem.device_pointer
self.assertEqual(devptr, int(devptr))
if ctx.device.unified_addressing:
self.assertEqual(devptr, mem.handle)
self.assertIsNotNone(mem.buffer)
self._test_alloc(lambda a: cu.MemHostAlloc(ctx, a), test)
self._test_alloc(ctx.mem_host_alloc, test)
logging.debug("MemHostAlloc succeeded")
logging.debug("EXIT: test_mem_host_alloc")
def test_launch_kernel(self):
logging.debug("ENTER: test_launch_kernel")
ctx = cu.Devices().create_some_context()
logging.debug("Context created")
N = 1024
C = 0.75
a = cu.MemHostAlloc(ctx, N * 4)
b = cu.MemHostAlloc(ctx, N * 4)
logging.debug("Memory allocated")
module = cu.Module(ctx, source_file="%s/test.cu" % self.path)
logging.debug("Program builded")
f = module.get_func("test")
logging.debug("Got function pointer")
f.set_args(a, b, numpy.array([C], dtype=numpy.float32))
f.set_args(a, cu.skip, numpy.array([C], dtype=numpy.float32))
f.set_args(cu.skip(2), numpy.array([C], dtype=numpy.float32))
f.set_args(a, b, cu.skip(1))
f.set_args(cu.skip(3))
f.set_arg(0, None)
f.set_arg(0, a)
logging.debug("Args set")
a_host = numpy.random.rand(N).astype(numpy.float32)
b_host = numpy.random.rand(N).astype(numpy.float32)
gold = a_host.copy()
for _ in range(10):
gold += b_host * C
a.to_device(a_host)
b.to_device_async(b_host)
for _ in range(10):
f((N, 1, 1))
logging.debug("Scheduled for execution")
c_host = numpy.zeros(N, dtype=numpy.float32)
a.to_host(c_host)
logging.debug("Got results back")
max_diff = numpy.fabs(c_host - gold).max()
self.assertLess(max_diff, 0.0001)
logging.debug("test_launch_kernel() succeeded")
logging.debug("EXIT: test_launch_kernel")
def test_memset(self):
logging.debug("ENTER: test_memset")
ctx = cu.Devices().create_some_context()
mem = cu.MemAlloc(ctx, 4096)
mem.memset32_async(123)
mem.memset32_async(456, 1)
mem.memset32_async(789, 2, 3)
a = numpy.zeros(mem.size // 4, dtype=numpy.int32)
mem.to_host(a)
self.assertEqual(a[0], 123)
self.assertEqual(a[1], 456)
for i in range(2, 2 + 3):
self.assertEqual(a[i], 789)
for i in range(2 + 3, a.size):
self.assertEqual(a[i], 456)
logging.debug("EXIT: test_memset")
def test_memcpy(self):
logging.debug("ENTER: test_memcpy")
ctx = cu.Devices().create_some_context()
a = cu.MemAlloc(ctx, 4096)
a.memset32_async(123)
b = cu.MemAlloc(ctx, 4096)
b.memset32_async(456)
test = numpy.zeros(a.size // 4, dtype=numpy.int32)
a.from_device_async(b)
a.to_host(test)
for x in test:
self.assertEqual(x, 456)
a.memset32_async(123)
a.from_device_async(b, 12)
a.to_host(test)
for x in test[:3]:
self.assertEqual(x, 123)
for x in test[3:]:
self.assertEqual(x, 456)
a.memset32_async(123)
a.from_device_async(b, 12, 64)
a.to_host(test)
for x in test[:3]:
self.assertEqual(x, 123)
for x in test[3:19]:
self.assertEqual(x, 456)
for x in test[19:]:
self.assertEqual(x, 123)
logging.debug("EXIT: test_memcpy")
def test_occupancy(self):
logging.debug("ENTER: test_occupancy")
ctx = cu.Devices().create_some_context()
logging.debug("Context created")
module = cu.Module(ctx, source_file="%s/test.cu" % self.path)
logging.debug("Program builded")
f = module.get_func("test")
logging.debug("Got function pointer")
num_blocks = f.max_active_blocks_per_multiprocessor(1)
self.assertEqual(num_blocks, int(num_blocks))
self.assertGreater(num_blocks, 0)
logging.debug("num_blocks = %d", num_blocks)
logging.debug("Testing dynamic_smem_size parameter")
num_blocks = f.max_active_blocks_per_multiprocessor(
128, dynamic_smem_size=8192)
self.assertEqual(num_blocks, int(num_blocks))
self.assertGreater(num_blocks, 0)
logging.debug("num_blocks = %d", num_blocks)
min_grid_size, block_size = f.max_potential_block_size()
self.assertEqual(min_grid_size, int(min_grid_size))
self.assertEqual(block_size, int(block_size))
self.assertGreater(min_grid_size, 0)
self.assertGreater(block_size, 0)
logging.debug("min_grid_size, block_size = %d, %d",
min_grid_size, block_size)
logging.debug("Trying callback")
min_grid_size, block_size = f.max_potential_block_size(
lambda x: x ** 2)
self.assertEqual(min_grid_size, int(min_grid_size))
self.assertEqual(block_size, int(block_size))
self.assertGreater(min_grid_size, 0)
self.assertGreater(block_size, 0)
logging.debug("min_grid_size, block_size = %d, %d",
min_grid_size, block_size)
logging.debug("Testing block_size_limit parameter")
min_grid_size, block_size = f.max_potential_block_size(
block_size_limit=16)
self.assertEqual(min_grid_size, int(min_grid_size))
self.assertEqual(block_size, int(block_size))
self.assertGreater(min_grid_size, 0)
self.assertGreater(block_size, 0)
self.assertLessEqual(block_size, 16)
logging.debug("min_grid_size, block_size = %d, %d",
min_grid_size, block_size)
logging.debug("Testing dynamic_smem_size parameter")
min_grid_size, block_size = f.max_potential_block_size(
dynamic_smem_size=8192)
self.assertEqual(min_grid_size, int(min_grid_size))
self.assertEqual(block_size, int(block_size))
self.assertGreater(min_grid_size, 0)
self.assertGreater(block_size, 0)
logging.debug("min_grid_size, block_size = %d, %d",
min_grid_size, block_size)
logging.debug("EXIT: test_occupancy")
def test_memcpy_3d_async(self):
logging.debug("ENTER: test_memcpy_3d_async")
p_copy = cu.get_ffi().new("CUDA_MEMCPY3D *")
self.assertEqual(cu.get_ffi().sizeof(p_copy[0]), 200)
ctx = cu.Devices().create_some_context()
logging.debug("Context created")
# Create arrays with some values for testing
a = numpy.arange(35 * 25 * 15, dtype=numpy.float32).reshape(35, 25, 15)
b = numpy.arange(37 * 27 * 17, dtype=numpy.float32).reshape(37, 27, 17)
b *= 0.5
c = numpy.empty_like(b)
c[:] = 1.0e30
# Create buffers
a_ = cu.MemAlloc(ctx, a)
b_ = cu.MemAlloc(ctx, b)
# Copy 3D rect from one device buffer to another
logging.debug("Testing device -> device memcpy_3d_async")
sz = a.itemsize
a_.memcpy_3d_async(
(3 * sz, 4, 5), (6 * sz, 7, 8), (5 * sz, 10, 20),
a.shape[2] * sz, a.shape[1], b.shape[2] * sz, b.shape[1],
dst=b_)
b_.to_host(c)
diff = numpy.fabs(c[8:28, 7:17, 6:11] - a[5:25, 4:14, 3:8]).max()
self.assertEqual(diff, 0)
# Copy 3D rect from host buffer to device buffer
logging.debug("Testing host -> device memcpy_3d_async")
sz = a.itemsize
b_.memset32_async()
b_.memcpy_3d_async(
(3 * sz, 4, 5), (6 * sz, 7, 8), (5 * sz, 10, 20),
a.shape[2] * sz, a.shape[1], b.shape[2] * sz, b.shape[1],
src=a)
c[:] = 1.0e30
b_.to_host(c)
diff = numpy.fabs(c[8:28, 7:17, 6:11] - a[5:25, 4:14, 3:8]).max()
self.assertEqual(diff, 0)
# Copy 3D rect from device buffer to host buffer
logging.debug("Testing device -> host memcpy_3d_async")
sz = a.itemsize
c[:] = 1.0e30
a_.memcpy_3d_async(
(3 * sz, 4, 5), (6 * sz, 7, 8), (5 * sz, 10, 20),
a.shape[2] * sz, a.shape[1], b.shape[2] * sz, b.shape[1],
dst=c)
ctx.synchronize()
diff = numpy.fabs(c[8:28, 7:17, 6:11] - a[5:25, 4:14, 3:8]).max()
self.assertEqual(diff, 0)
logging.debug("EXIT: test_memcpy_3d_async")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| |
import socket
import asyncore
import codecs
from time import time
from random import randint
from common import BLANK, BUFFERSIZE
from httpscopeinterface import connections_waiting, scope_messages, scope
from utils import pretty_print_XML, pretty_print
def encode_varuint(value):
if value == 0:
return "\0"
out = ""
value = value & 0xffffffffffffffff
while value:
part = value & 0x7f
value >>= 7
if value:
part |= 0x80
out += chr(part)
return out
"""
msg_type: 1 = command, 2 = response, 3 = event, 4 = error
message TransportMessage
{
required string service = 1;
required uint32 commandID = 2;
required uint32 format = 3;
optional uint32 status = 4;
optional uint32 tag = 5;
required binary payload = 8;
}
"""
TYPE = 0
SERVICE = 1
COMMAND = 2
FORMAT = 3
STATUS = 4
TAG = 5
PAYLOAD = 8
STP1_COMMAND = "".join([encode_varuint(1),
encode_varuint(SERVICE << 3 | 2), "%s", "%s",
encode_varuint(COMMAND << 3 | 0), "%s",
encode_varuint(FORMAT << 3 | 0), "%s",
encode_varuint(TAG << 3 | 0), "%s",
encode_varuint(PAYLOAD << 3 | 2), "%s", "%s"])
STP1_MSG = "STP\x01%s%s"
class ScopeConnection(asyncore.dispatcher):
def __init__(self, conn, addr, context):
asyncore.dispatcher.__init__(self, sock=conn)
self.addr = addr
self.debug = context.debug
self.debug_format = context.format
self.debug_format_payload = context.format_payload
self.verbose_debug = context.verbose_debug
self.debug_only_errors = context.only_errors
self.force_stp_0 = context.force_stp_0
# STP 0 meassages
self.in_buffer = u""
self.out_buffer = ""
self.buf_cursor = 0
self.handle_read = self.handle_read_STP_0
self.check_input = self.read_int_STP_0
self.msg_length = 0
self.stream = codecs.lookup('UTF-16BE').streamreader(self)
# STP 1 messages
self.connect_client_callback = None
self.varint = 0
self._service_list = None
scope.set_connection(self)
self._msg_count = 0
self._last_time = 0
# ============================================================
# STP 0
# ============================================================
# Initialisation, command and message flow for STP 0
#
# Opera proxy client
#
# *services ---------------->
# -----------------> *services
# <----------------- *enable
# *enable <----------------
# data <---------------------------------------> data
# ....
# <------------------ *quit
# *disable <----------------
# *quit ---------------->
# ------------------> *hostquit
# ------------------> *quit
#
# See also http://dragonfly.opera.com/app/scope-interface for more details.
def send_command_STP_0(self, msg):
""" to send a message to scope"""
if self.debug and not self.debug_only_errors:
service, payload = msg.split(BLANK, 1)
pretty_print_XML("\nsend to scope: %s" % service, payload, self.debug_format)
self.out_buffer += ("%s %s" % (len(msg), msg)).encode("UTF-16BE")
self.handle_write()
def send_STP0_message_to_client(self, command, msg):
"""send a message to the client"""
if connections_waiting:
connections_waiting.pop(0).return_scope_message_STP_0(
(command, msg), self)
else:
scope_messages.append((command, msg))
def read_int_STP_0(self):
"""read int STP 0 message"""
if BLANK in self.in_buffer:
raw_int, self.in_buffer = self.in_buffer.split(BLANK, 1)
self.msg_length = int(raw_int)
self.check_input = self.read_msg_STP_0
self.check_input()
def read_msg_STP_0(self):
"""read length STP 0 message"""
if len(self.in_buffer) >= self.msg_length:
command, msg = self.in_buffer[0:self.msg_length].split(BLANK, 1)
self.in_buffer = self.in_buffer[self.msg_length:]
self.msg_length = 0
msg = msg.encode("UTF-8")
command = command.encode("UTF-8")
if command == "*services":
services = msg.split(',')
print "services available:\n ", "\n ".join(services)
if not self.force_stp_0 and 'stp-1' in services:
self.set_initializer_STP_1()
self.send_command_STP_0('*enable stp-1')
self._service_list = services
else:
scope.set_service_list(services)
for service in services:
scope.services_enabled[service] = False
elif command in scope.services_enabled:
self.send_STP0_message_to_client(command, msg)
self.check_input = self.read_int_STP_0
self.check_input()
def read(self, max_length):
"""to let the codec streamreader class treat
the class itself like a file object"""
try:
return self.recv(max_length)
except socket.error:
return ''
def handle_read_STP_0(self):
"""general read event handler for STP 0"""
self.in_buffer += self.stream.read(BUFFERSIZE)
self.check_input()
# ============================================================
# STP 1
# ============================================================
# Initialisation of STP 1
# see also scope-transport-protocol.txt and scope-stp1-services.txt
#
# If stp-1 is in the service list it will get enabled on receiving
# the service list in this class ( the stp 1handshake ).
# The command "services" in the http interface ( HTTPScopeInterface )
# is treated as (re)load event of the client. That event triggers the
# Connect command ( which also resets any state for that client
# in the host ), executed in the Scope class. If the command succeeds
# the service list is returnd to the client. From this point on the control
# is up to the client.
#
# ~~~~~~~~~> Handshake
# ~ ~ ~ ~ ~> Handshake response
# ---------> Command
# - - - - -> Response
# =========> Event
#
# The client must then initiate the handshake which also determines the STP
# version to use, for instance to enable STP version 1::
#
# Host client
#
# *services =================>
# <~~~~~~~~~~~~~~~~~ *enable stp-1
# STP/1\n ~ ~ ~ ~ ~ ~ ~ ~ ~>
# <~~~~~~~~~~~~~~~~~ scope.Connect
# scope.Connect ~ ~ ~ ~ ~ ~ ~ ~ ~>
#
# Typical message flow between a client, proxy and host looks like this:
#
# Opera proxy client
#
# handshake <~~~~~~~~~~~~~~~~ ~ ~ ~ ~ ~ ~ ~ ~ ~> handshake
# <----------------- scope.Connect
# scope.Connect <----------------
# - - - - - - - - >
# - - - - - - - - -> scope.Connect
# <----------------- scope.Enable
# scope.Enable <----------------
# - - - - - - - - >
# - - - - - - - - -> scope.Enable
#
# messages <------------------- - - - - - - - - -> messages
# events =======================================>
# ....
# <----------------- scope.Disconnect
# scope.Disconnect<----------------
# - - - - - - - - >
# - - - - - - - - -> scope.Disconnect
#
#
# See also http://dragonfly.opera.com/app/scope-interface for more details.
def set_initializer_STP_1(self):
if self.in_buffer or self.out_buffer:
raise Exception("read or write buffer is not empty in set_initializer_STP_1")
self.in_buffer = ""
self.out_buffer = ""
self.handle_read = self.read_STP_1_initializer
self.check_input = None
self.msg_length = 0
def read_STP_1_initializer(self):
self.in_buffer += self.recv(BUFFERSIZE)
if self.in_buffer.startswith("STP/1\n"):
self.in_buffer = self.in_buffer[6:]
scope.set_STP_version("stp-1")
scope.set_service_list(self._service_list)
self._service_list = None
self.buf_cursor = 4
self.handle_read = self.handle_read_STP_1
self.handle_stp1_msg = self.handle_stp1_msg_default
if self.in_buffer: self.handle_read()
def send_command_STP_1(self, msg):
if self.debug and not self.debug_only_errors:
pretty_print("send to host:", msg, self.debug_format, self.debug_format_payload)
stp_1_cmd = STP1_COMMAND % (encode_varuint(len(msg[SERVICE])), msg[SERVICE],
encode_varuint(msg[COMMAND]),
encode_varuint(msg[FORMAT]),
encode_varuint(msg[TAG]),
encode_varuint(len(msg[PAYLOAD])), msg[PAYLOAD])
self.out_buffer += STP1_MSG % (encode_varuint(len(stp_1_cmd)), stp_1_cmd)
self.handle_write()
def handle_read_STP_1(self):
self.in_buffer += self.recv(BUFFERSIZE)
while True:
if not self.varint:
varint = self.decode_varuint()
if varint == None: break
else: self.varint = varint
else:
pos = self.buf_cursor + self.varint
if len(self.in_buffer) >= pos:
self.parse_STP_1_msg(pos)
self.varint = 0
if len(self.in_buffer) > BUFFERSIZE:
self.in_buffer = self.in_buffer[pos:]
self.buf_cursor = 4
else: self.buf_cursor = pos + 4
else: break
def parse_STP_1_msg(self, end_pos):
msg_type = self.decode_varuint()
if msg_type == None:
raise Exception("Message type of STP 1 message cannot be parsed")
else:
msg = {TYPE: msg_type, STATUS: 0, TAG: 0, PAYLOAD: ""}
while self.buf_cursor < end_pos:
varint = self.decode_varuint()
if not varint == None:
tag, type = varint >> 3, varint & 7
if type == 2:
length = self.decode_varuint()
pos = self.buf_cursor
msg[tag] = self.in_buffer[pos:pos + length]
self.buf_cursor += length
elif type == 0:
value = self.decode_varuint()
msg[tag] = value
else: raise Exception("Not valid type in STP 1 message")
else: raise Exception("Cannot read STP 1 message part")
self.handle_stp1_msg(msg)
def handle_stp1_msg_default(self, msg):
if connections_waiting:
connections_waiting.pop(0).return_scope_message_STP_1(msg, self)
else:
scope_messages.append(msg)
def set_msg_handler(self, handler):
self.handle_stp1_msg = handler
def clear_msg_handler(self):
self.handle_stp1_msg = self.handle_stp1_msg_default
def connect_client(self, callback):
self.connect_client_callback = callback
self.handle_stp1_msg = self.handle_connect_client
self.send_command_STP_1({TYPE: 1,
SERVICE: "scope",
COMMAND: 3,
FORMAT: 1,
TAG: 0,
PAYLOAD: '["json"]'})
def handle_connect_client(self, msg):
if self.debug and not self.debug_only_errors:
pretty_print("client connected:", msg, self.debug_format, self.debug_format_payload)
if msg[SERVICE] == "scope" and msg[COMMAND] == 3 and msg[STATUS] == 0:
self.handle_stp1_msg = self.handle_stp1_msg_default
self.connect_client_callback()
self.connect_client_callback = None
else:
print "conection to host failed in scope.handle_connect_callback"
def decode_varuint(self):
value = 0
buf_len = len(self.in_buffer)
pos = self.buf_cursor
for i in [0, 7, 14, 21, 28, 35, 42, 49, 56, 63]:
if pos >= buf_len: return None
c = ord(self.in_buffer[pos])
pos += 1
if c & 0x80: value += c - 128 << i
else:
value += c << i
self.buf_cursor = pos
return value
return None
# ============================================================
# Implementations of the asyncore.dispatcher class methods
# ============================================================
def handle_read(self):
pass
def writable(self):
return (len(self.out_buffer) > 0)
def handle_write(self):
sent = self.send(self.out_buffer)
self.out_buffer = self.out_buffer[sent:]
def handle_close(self):
scope.reset()
self.close()
| |
#!/usr/bin/env python
import rospy
from threading import Thread
from sensor_msgs.msg import JointState
from brics_actuator.msg import JointVelocities, JointValue
from rospy.exceptions import ROSException
from robotnik_msgs.msg import State
from std_srvs.srv import Empty
from cob_srvs.srv import Trigger
import time
#
# STANDARD INTERFACE
#
class DeviceCommandInterface():
'''
Class intended to communicate with controller devices by using a standard interface
'''
def __init__(self, args):
'''
Component initialization
@param args: arguments to configure the component
@type args: {name: string, command_topic: string, state_topic: string, joints: [string]}
'''
self.initialized = False
if args.has_key('type'):
self.type = args['type']
else:
self.type = 'DeviceCommandInterface'
if args.has_key('name'):
self.name = args['name']
else:
self.name = 'unnamed'
rospy.logerr('%s:init: param name not found'%self.type)
if args.has_key('command_topic'):
self.command_topic = args['command_topic']
else:
self.command_topic = ''
rospy.logerr('%s:init: param command_topic not found'%self.type)
if args.has_key('state_topic'):
self.state_topic = args['state_topic']
print 'state topic = %s'%(self.state_topic)
else:
self.state_topic = ''
rospy.logerr('%s:init: param state_topic not found'%self.type)
if args.has_key('joints'):
self.joint_names = args['joints']
else:
self.joint_names = []
rospy.logerr('%s:init: param joints not found'%self.type)
self.joint_state = JointState()
# Intermediate structure to save each pos, vel and effort value before sending the command to the component
# Ej.: {'j1': [ 0, 0, 0 ]}
self.joint_state_pointer = {}
# State of the component
self.state = State.READY_STATE
def setup(self):
'''
Initializes joint values, connect to command and state topics
@return: 0 if OK, -1 otherwise
'''
if len(self.joint_names) == 0:
rospy.logerr('%s-%s:setup: no joints provided'%(self.type, self.name))
return -1
else:
for i in range(len(self.joint_names)):
self.joint_state.name.append(self.joint_names[i])
self.joint_state.position.append(0)
self.joint_state.velocity.append(0)
self.joint_state.effort.append(0)
#
self.joint_state_pointer[self.joint_names[i]] = [ 0, 0, 0]
'''
# TODO for each component
if len(self.state_topic) > 0:
try:
self.state_subscriber = rospy.Subscriber(self.state_topic, String, self.receiveStateCb)
except ValueError, e:
rospy.logerr('%s-%s:setup: Error connecting to topic %s ->(%s)'%(self.type, self.name, self.state_topic, e))
'''
if len(self.command_topic) > 0:
try:
self.command_publisher = rospy.Publisher(self.command_topic, JointState)
rospy.loginfo('%s-%s:setup: connecting to topic %s'%(self.type, self.name, self.command_topic))
except ValueError, e:
rospy.logerr('%s-%s:setup: Error connecting to topic %s ->(%s)'%(self.type, self.name, self.command_topic, e))
return -1
else:
rospy.logerr('%s-%s:setup: No command topic supplied.'%(self.type, self.name))
return -1
self.initialized = True
return 0
def setDesiredJointValue(self, name, value):
'''
Sets the joint value to desired value
@param name: name of the joint
@type name: string
@param value: desired value of the joint
@type value: array with the values of [position, velocity, effort]
@return: 0 if OK, -1 otherwise
'''
if not self.initialized:
rospy.logerr('%s-%s:setDesiredJointValue: interface not initialized correctly'%(self.type, self.name, name))
return -1
if self.joint_state_pointer.has_key(name):
if len(value) == 3:
self.joint_state_pointer[name][0] = float(value[0])
self.joint_state_pointer[name][1] = float(value[1])
self.joint_state_pointer[name][2] = float(value[2])
else:
rospy.logerr('%s-%s:setDesiredJointValue: incorrect length of desired value %s'%(self.type, self.name, value))
return -1
else:
rospy.logerr('%s-%s:setDesiredJointValue: joint %s is not associated with this interface'%(self.type, self.name, name))
return -1
return 0
def sendCommand(self):
'''
Sends the current value of joint_state attribute to the controller
@return: 0 if OK, -1 otherwise
'''
if not self.initialized:
return -1
# copy desired values into the joint_state structure
for i in range(len(self.joint_names)):
self.joint_state.position[i] = self.joint_state_pointer[self.joint_names[i]][0]
self.joint_state.velocity[i] = self.joint_state_pointer[self.joint_names[i]][1]
self.joint_state.effort[i] = self.joint_state_pointer[self.joint_names[i]][2]
self.joint_state.header.stamp = rospy.Time.now()
#rospy.loginfo('%s-%s:sendCommand: sending command (pos = %s) to %s'%(self.type, self.name, self.joint_state.position, self.command_topic))
self.command_publisher.publish(self.joint_state)
return 0
def getState(self):
'''
Gets the state of the controller interface
@return: the state of the component based on robotnik_trajectory_msgs.msg.State
'''
return self.state
def stop(self):
'''
Stops any movement
@return: 0 if OK, -1 otherwise
'''
# Sets velocity to 0.0
for name in self.joint_names:
self.joint_state_pointer[name][1] = 0.0
self.sendCommand()
return 0
def receiveStateCb(self, msg):
'''
Callback associated with the topic state
'''
pass
def shutdown(self):
'''
Unregister ROS components
'''
if hasattr(self, 'command_publisher'):
self.command_publisher.unregister()
if hasattr(self, 'state_subscriber'):
self.state_subscriber.unregister()
self.initialized = False
def initialize(self):
'''
Initializes the component
'''
self.state = State.READY_STATE
def recover(self):
'''
Recovers the component
'''
pass
#
# IPA CANOPEN Interface
#
class IpaCANOpenCommandInterface(DeviceCommandInterface):
'''
Command interface to interact with Schunk LWA4P (Powerball) arm
'''
def __init__(self, args):
DeviceCommandInterface.__init__(self, args)
if args.has_key('init_service'):
self._init_service_name = args['init_service']
#print 'IpaCANOpenCommandInterface: init_service = %s'%self._init_service_name
else:
self._init_service_name = ''
if args.has_key('recovery_service'):
self._recover_service_name = args['recovery_service']
#print 'IpaCANOpenCommandInterface: recover_service = %s'%self._recover_service_name
else:
self._recover_service_name = ''
# Joint state is not used. Instead it will use the interface for BricsActuator
self.joint_velocities = JointVelocities()
# Flag active when the initialization service is called
self.initializing = False
# Flag active when the recovery service is called
self.recovering = False
def setup(self):
'''
Initializes joint values, connect to command and state topics
@return: 0 if OK, -1 otherwise
'''
if len(self.joint_names) == 0:
rospy.logerr('%s-%s:setup: no joints provided'%(self.type, self.name))
return -1
else:
for i in range(len(self.joint_names)):
j = JointValue()
j.joint_uri = self.joint_names[i]
j.value = 0.0
j.unit = 'rad'
self.joint_velocities.velocities.append(j)
self.joint_state_pointer[self.joint_names[i]] = [ 0, 0, 0]
# SUBSCRIBERS
# IPA state
#self.state_subscriber = rospy.Subscriber(self.state_topic, IpaCanOpenState, self.receiveStateCb)
#print '**********IPA setup*********'
# PUBLISHERS
if len(self.command_topic) > 0:
try:
self.command_publisher = rospy.Publisher(self.command_topic, JointVelocities)
except ValueError, e:
rospy.logerr('%s-%s:setup: Error connecting to topic %s ->(%s)'%(self.type, self.name, self.command_topic, e))
return -1
else:
rospy.logerr('%s-%s:setup: No command topic supplied.'%(self.type, self.name))
return -1
# IPA SERVICES
try:
self._service_init = rospy.ServiceProxy(self._init_service_name, Trigger)
except ValueError, e:
rospy.logerr('%s-%s:setup: Error connecting service (%s)'%(self.type, self.name, e))
try:
self._service_recover = rospy.ServiceProxy(self._recover_service_name, Trigger)
except ValueError, e:
rospy.logerr('%s-%s:setup: Error connecting service (%s)'%(self.type, self.name, e))
self.initialized = True
return 0
def sendCommand(self):
'''
Sends the current value of joint_state attribute to the controller
@return: 0 if OK, -1 otherwise
'''
if not self.initialized:
return -1
t = rospy.Time.now()
# copy desired values into the joint_state structure
for i in range(len(self.joint_names)):
# Only needs velocity
self.joint_velocities.velocities[i].value = self.joint_state_pointer[self.joint_names[i]][1]
self.joint_velocities.velocities[i].timeStamp = t
#rospy.loginfo('%s-%s:sendCommand: sending command %s to %s'%(self.type, self.name, self.joint_velocities.velocities ,self.command_topic))
self.command_publisher.publish(self.joint_velocities)
return 0
def receiveStateCb(self, msg):
'''
Callback associated with the topic state
'''
pass
def initialize_call(self):
'''
Calls the initialization service
Blocking call
'''
try:
ret = self._service_init()
if ret.success.data:
rospy.loginfo('%s-%s:initialize_call: Initialized successfully'%(self.type, self.name))
else:
rospy.logerr('%s-%s:initialize_call: Error on Initialize'%(self.type, self.name))
except rospy.ServiceException, e:
rospy.logerr('%s-%s:initialize_call: Error calling service: %s'%(self.type, self.name, e))
self.initializing = False
def initialize(self):
'''
Initializes the component
'''
'''
self.initializing = False
'''
if not self.initializing:
self.initializing = True
rospy.loginfo('%s-%s:initialize: Starting initialization'%(self.type, self.name))
self.thread_init = Thread(target = self.initialize_call)
self.thread_init.start()
def recover_call(self):
'''
Calls the recover service
Blocking call
'''
try:
ret = self._service_recover()
if ret.success.data:
rospy.loginfo('%s-%s:recover_call: Recovered successfully'%(self.type, self.name))
else:
rospy.logerr('%s-%s:recover_call: Error on Recover'%(self.type, self.name))
except rospy.ServiceException, e:
rospy.logerr('%s-%s:recover_call: Error calling service: %s'%(self.type, self.name, e))
self.recovering = False
def recover(self):
'''
Recovers the component
'''
'''
self.initializing = False
'''
if not self.recovering:
self.recovering = True
rospy.loginfo('%s-%s:recover: Starting recover'%(self.type, self.name))
self.thread_recover = Thread(target = self.recover_call)
self.thread_recover.start()
def shutdown(self):
'''
Shutdowns connections
'''
DeviceCommandInterface.shutdown(self)
self._service_init.close()
self._service_recover.close()
| |
"""Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from math import sqrt
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
Lkk = linalg.norm(X[:, lam]) ** 2 - v
if Lkk <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = sqrt(Lkk)
else:
L[0, 0] = linalg.norm(X[:, lam])
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = X'y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
Lkk = Gram[lam, lam] - v
if Lkk <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = sqrt(Lkk)
else:
L[0, 0] = sqrt(Gram[lam, lam])
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = X'y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
OrthogonalMatchingPursuitCV
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues : array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values. Will be cast to X's dtype if necessary
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, ensure_min_features=2,
estimator=self)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv.split(X))
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| |
import os
from vigra import readImage
import h5py
from progressbar import AnimatedMarker, Bar, BouncingBar, Counter, ETA, \
FileTransferSpeed, FormatLabel, Percentage, \
ProgressBar, ReverseBar, RotatingMarker, \
SimpleProgress, Timer
from colorama import Fore, Back, Style
import os.path
#from termcolor import colored
def getFiles(path,ending):
fnames = []
baseNames = []
for root, dirs, files in os.walk(path):
for f in files:
if f.endswith(ending):
fp = os.path.join(root, f)
baseName = f.split('.')[0]
#print baseName
fnames.append(fp)
baseNames.append(baseName)
return fnames,baseNames
def makeFullPath(folder,baseNames,ending):
fps = []
if folder[-1]=='/':
f=folder
else :
f='%s/'%folder
if ending[0]=='.':
e=ending
else :
e='.%s'%ending
for baseName in baseNames :
fp="%s%s%s"%(f,baseName,e)
#print fp
fps.append(fp)
return fps
def pBar(size,name=""):
#print(Back.CYAN+Fore.BLACK + self.name + Fore.RESET + Back.RESET + Style.RESET_ALL)
widgets = [ Back.CYAN+Fore.BLACK ," %s :"%name ,Percentage(), ' ',
Bar(marker=RotatingMarker()), ' ', ETA(), ' ', FileTransferSpeed(),Back.RESET + Style.RESET_ALL]
if size == 1 :
size+=1
pbar = ProgressBar(widgets=widgets, maxval=size-1).start()
return pbar
def h5Exist(f,dset):
try :
if os.path.isfile(f):
h5file = h5py.File(f,'r')
e = False
if dset in h5file.keys():
#h5file[dset]
e = True
h5file.close()
return e
return False
except:
return False
class LazyArrays(object):
def __init__(self,files,dset=None,filetype="h5"):
self.files = files
self.dset = dset
self.filetype = filetype
def __len__(self):
return len(self.files)
def __getitem__(self,index):
if self.filetype == "image" :
return readImage(self.files[index])
elif self.filetype == "h5" :
f = h5py.File(self.files[index],'r')
value = f[self.dset].value
f.close()
return value
class LazyCaller(object):
def __init__(self,f,skip=False,verbose=True,overwrite=False,
name="",compress=False,skipAll=False,compressionOpts=2):
self.f = f
self._batchKWargs = set()
self.verbose = verbose
self.outputFiles = None
self.dset = None
self.overwrite = overwrite
self.name = name
self.compress = compress
self.compressionOpts= compressionOpts
self.skipAll = skipAll
def setCompression(self,compress,compressionOpts=2):
self.compress=compress
self.compressionOpts=compressionOpts
def setBatchKwargs(self,batchKWargs):
self._batchKWargs = set(batchKWargs)
def setOutput(self,files,dset):
self.outputFiles=files
self.dset=dset
def __call__(self,*args,**kwargs):
#print colored('Compute ', 'red'), colored(self.name, 'green')
#from colorama import Fore, Back, Style
#print(Back.CYAN+Fore.BLACK + self.name + Fore.RESET + Back.RESET + Style.RESET_ALL)
#print( Back.GREEN+ 'some red text')
#print(Back.GREEN + 'and with a green background')
#print(Style.DIM + 'and in dim text')
#print(Fore.RESET + Back.RESET + Style.RESET_ALL)
#print('back to normal now')
assert self.outputFiles is not None
if len(args)>0 :
raise RuntimeError("LazyCaller(...) does only support keyword arguments")
if self.skipAll :
if self.verbose :
print(Back.CYAN+Fore.BLACK +" SKIP %s"%self.name+Back.RESET + Style.RESET_ALL)
else :
constKwargs=dict()
batchKwargs=dict()
callingKwargs=dict()
batchLen = None
for kwarg in kwargs.keys() :
if kwarg in self._batchKWargs :
batchInput = kwargs[kwarg]
batchKwargs[kwarg] = batchInput
if batchLen is None :
batchLen = len(batchInput)
else:
assert batchLen == len(batchInput)
else :
constKwargs[kwarg]=kwargs[kwarg]
callingKwargs[kwarg]=kwargs[kwarg]
if self.verbose :
pbar = pBar(batchLen,name=self.name)
# iterate over all batch items
for batchIndex in range(batchLen):
#check if we need to do the computation
exist = h5Exist(self.outputFiles[batchIndex],self.dset)
if exist == False or self.overwrite == True:
# set up the kwargs for a single function call
for batchKwarg in self._batchKWargs :
batchItem = kwargs[batchKwarg][batchIndex]
callingKwargs[batchKwarg]=batchItem
#print "batchIndex",batchIndex,"len",batchLen
# call the actual function and store the result
self._local_call_(callingKwargs,self.outputFiles[batchIndex])
if self.verbose :
pbar.update(batchIndex)
if self.verbose :
pbar.finish()
def _local_call_(self,callingKwargs,outputFile):
result = self.f(**callingKwargs)
f = h5py.File(outputFile,'w')
if self.compress :
dataset = f.create_dataset(self.dset,shape=result.shape,compression='gzip',compression_opts=self.compressionOpts)
dataset[...]=result
else :
f[self.dset]=result
#f[self.dset] =result
f.close()
if __name__ == "__main__" :
imagePath = "/home/tbeier/src/privatOpengm/experiments/datasets/bsd500/BSR/BSDS500/data/images/test/"
files = getFiles(imagePath,"jpg")
images = LazyArrays(files,filetype="image")
print type(images)
img = images[0]
for img in images :
print img.shape
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from gevent import monkey
monkey.patch_all()
from gevent.pywsgi import WSGIServer
import re
from urllib import unquote
static_setting = {
'templates': r'templates'
}
from HTTPerror import HTTP404Error, HTTP403Error, HTTP502Error
class RouteError(Exception):
def __init__(self, info=None):
print "<" + info + ">"
def __str__(self):
if(self.info == 'too many re'):
return "<TOO MORE REGULAR SEARCH>"
if(self.info == 'route error'):
return '<WRONG ROUTE DESIGN>'
if(self.info == 'query already in request'):
return "<IT HAS ALREADY IN REQUEST VALUE>"
class RequestError(Exception):
def __init__(self):
pass
class RequestValueError(RequestError):
def __str__(self):
return "<the value has already in request's data>"
class WebApp():
urls = []
_parsed_urls = []
global static_setting
templates = False
def __init__(self, environ=None, get_urls=True):
self.request = {}
if environ:
self._environ = environ
self._path = self._environ['PATH_INFO']
if self._path[-1] != '/':
self._path = self._path + '/'
try:
self.request['cookies'] = self._environ['HTTP_COOKIE']
except KeyError:
self.request['cookies'] = None
self.request['http_protocol'] = self._environ['SERVER_PROTOCOL']
self.request['user_agent'] = self._environ['HTTP_USER_AGENT']
try:
self.request['http_connect'] = self._environ['HTTP_CONNECTION']
except KeyError:
self.request['http_connect'] = None
self.request['http_port'] = self._environ['HTTP_HOST']
self.request['method'] = self._environ['REQUEST_METHOD']
try:
self.request['content_length'] = self._environ[
'CONTENT_LENGTH']
self.request['content_type'] = self._environ['CONTENT_TYPE']
self.request['http_accept_encoding'] = self._environ[
'HTTP_ACCEPT_ENCODING']
except KeyError:
self.request['content_length'] = None
self.request['content_type'] = None
self.request['http_accept_encoding'] = None
self.request['data'] = {}
self.request['query_string'] = {}
# Beyond Comp
line = self._environ['QUERY_STRING']
request_data = environ['wsgi.input'].read(environ.get('content_length', 0))
if request_data:
request_data = unquote(request_data)
for data_pair in request_data.split('&'):
key, value = data_pair.split('=')
self.request['data'][key] = value
# up
query_string = self._environ['QUERY_STRING']
if query_string:
query_string = unquote(query_string)
for data_pair in query_string.split('&'):
try:
key, value = data_pair.split('=')
self.request['data'][key] = value
self.request['query_string'][key] = value
except ValueError:
pass
# Below Comp
if not get_urls:
for url in self.urls:
try:
res = self.url_parse(url[0])
except RouteError:
print "<the route design got some mistakes>"
raise HTTP404Error
if isinstance(res, tuple):
self._parsed_urls.append((res[0] + '$', url[1], res[1]))
else:
self._parsed_urls.append((res + '$', url[1]))
if self.templates:
static_setting['templates'] = self.templates
def __repr__(self):
return "Jolla.WebAppObject"
def __str__(self):
return "<class 'Jolla.WebAppObject'>"
def parse(self, urls):
for url_handler in urls:
if url_handler[0] == r'/':
if self._path != '/':
continue
else:
html_code = url_handler[1](self.request)
url_reg = re.compile(url_handler[0])
if url_reg.match(self._path):
if '?' in url_handler[0]:
re_query = re.findall(url_reg, self._path)
if re_query[0]:
if url_handler[2] in self.request:
raise RouteError("query already in request")
else:
self.request[url_handler[2]] = re_query[0]
html_code = url_handler[1](self.request)
return html_code
try:
html_code = url_handler[1](self.request)
except TypeError:
html_code = url_handler[1]()
return html_code
raise HTTP404Error('REQUEST NOT FOUND IN ROUTE CONFIGURATION')
def url_parse(self, path):
path = path.replace(' ', '')
if path[-1] != '/':
path = path + '/'
if '<' in path and '>' in path:
if path.count("<") != path.count(">"):
raise RouteError("route error")
if path.count("<") > 1:
raise RouteError("too many re")
reg = re.compile(r'<(\w+)>')
url_query = re.findall(reg, path)[0]
the_url = path.replace('<' + url_query + '>',
'(?P<' + url_query + '>\\w+)')
return (the_url, url_query)
return path
def get_parsed_urls(self):
return self._parsed_urls
class jolla_server(WSGIServer):
def __init__(self, app, port=8000, host="127.0.0.1", debug=False):
self.port = port
self.host = host
self.app = app
my_app = self.app(get_urls=False)
self.urls = my_app.get_parsed_urls()
WSGIServer.__init__(self, listener=(
self.host, self.port), application=self.application)
def __str__(self):
return "<class 'Jolla.jolla_serverObeject'>"
def __repr__(self):
return 'Jolla.jolla_serverObeject'
def application(self, environ, start_response):
try:
the_app = self.app(environ)
html_code = the_app.parse(self.urls)
if not isinstance(html_code, tuple):
html_code = (html_code, ('Content-Type', 'text/html'))
status = '200 OK'
except HTTP404Error:
status = '404 NOT FOUND'
html_code = ('404 NOT FOUND', ('Content-Type', 'text/html'))
header = [
('Server', 'Jolla/1.0')
]
for i in range(1, len(html_code)):
header.append(html_code[i])
start_response(status, header)
return html_code[0]
def run_server(self, reload = False):
print "the server is running on the {} in the port {}".format(self.host, self.port)
if reload:
from werkzeug.serving import run_simple
run_simple(self.host, self.port, self, use_debugger=True, use_reloader=True)
else:
self.serve_forever()
def __call__(self, environ, start_response):
return self.application(environ, start_response)
| |
#!/usr/bin/env python
"""
DigitalOcean external inventory script
======================================
Generates Ansible inventory of DigitalOcean Droplets.
In addition to the --list and --host options used by Ansible, there are options
for generating JSON of other DigitalOcean data. This is useful when creating
droplets. For example, --regions will return all the DigitalOcean Regions.
This information can also be easily found in the cache file, whose default
location is /tmp/ansible-digital_ocean.cache).
The --pretty (-p) option pretty-prints the output for better human readability.
----
Although the cache stores all the information received from DigitalOcean,
the cache is not used for current droplet information (in --list, --host,
--all, and --droplets). This is so that accurate droplet information is always
found. You can force this script to use the cache with --force-cache.
----
Configuration is read from `digital_ocean.ini`, then from environment variables,
and then from command-line arguments.
Most notably, the DigitalOcean API Token must be specified. It can be specified
in the INI file or with the following environment variables:
export DO_API_TOKEN='abc123' or
export DO_API_KEY='abc123'
Alternatively, it can be passed on the command-line with --api-token.
If you specify DigitalOcean credentials in the INI file, a handy way to
get them into your environment (e.g., to use the digital_ocean module)
is to use the output of the --env option with export:
export $(digital_ocean.py --env)
----
The following groups are generated from --list:
- ID (droplet ID)
- NAME (droplet NAME)
- digital_ocean
- image_ID
- image_NAME
- distro_NAME (distribution NAME from image)
- region_NAME
- size_NAME
- status_STATUS
For each host, the following variables are registered:
- do_backup_ids
- do_created_at
- do_disk
- do_features - list
- do_id
- do_image - object
- do_ip_address
- do_private_ip_address
- do_kernel - object
- do_locked
- do_memory
- do_name
- do_networks - object
- do_next_backup_window
- do_region - object
- do_size - object
- do_size_slug
- do_snapshot_ids - list
- do_status
- do_tags
- do_vcpus
- do_volume_ids
-----
```
usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets]
[--regions] [--images] [--sizes] [--ssh-keys]
[--domains] [--tags] [--pretty]
[--cache-path CACHE_PATH]
[--cache-max_age CACHE_MAX_AGE] [--force-cache]
[--refresh-cache] [--env] [--api-token API_TOKEN]
Produce an Ansible Inventory file based on DigitalOcean credentials
optional arguments:
-h, --help show this help message and exit
--list List all active Droplets as Ansible inventory
(default: True)
--host HOST Get all Ansible inventory variables about a specific
Droplet
--all List all DigitalOcean information as JSON
--droplets, -d List Droplets as JSON
--regions List Regions as JSON
--images List Images as JSON
--sizes List Sizes as JSON
--ssh-keys List SSH keys as JSON
--domains List Domains as JSON
--tags List Tags as JSON
--pretty, -p Pretty-print results
--cache-path CACHE_PATH
Path to the cache files (default: .)
--cache-max_age CACHE_MAX_AGE
Maximum age of the cached items (default: 0)
--force-cache Only use data from the cache
--refresh-cache, -r Force refresh of cache by making API requests to
DigitalOcean (default: False - use cache files)
--env, -e Display DO_API_TOKEN
--api-token API_TOKEN, -a API_TOKEN
DigitalOcean API Token
```
"""
# (c) 2013, Evan Wies <evan@neomantra.net>
# (c) 2017, Ansible Project
# (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
#
# Inspired by the EC2 inventory plugin:
# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
######################################################################
import argparse
import ast
import os
import re
import requests
import sys
from time import time
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import json
class DoManager:
def __init__(self, api_token):
self.api_token = api_token
self.api_endpoint = 'https://api.digitalocean.com/v2'
self.headers = {'Authorization': 'Bearer {0}'.format(self.api_token),
'Content-type': 'application/json'}
self.timeout = 60
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.api_endpoint, path)
def send(self, url, method='GET', data=None):
url = self._url_builder(url)
data = json.dumps(data)
try:
if method == 'GET':
resp_data = {}
incomplete = True
while incomplete:
resp = requests.get(url, data=data, headers=self.headers, timeout=self.timeout)
json_resp = resp.json()
for key, value in json_resp.items():
if isinstance(value, list) and key in resp_data:
resp_data[key] += value
else:
resp_data[key] = value
try:
url = json_resp['links']['pages']['next']
except KeyError:
incomplete = False
except ValueError as e:
sys.exit("Unable to parse result from %s: %s" % (url, e))
return resp_data
def all_active_droplets(self):
resp = self.send('droplets/')
return resp['droplets']
def all_regions(self):
resp = self.send('regions/')
return resp['regions']
def all_images(self, filter_name='global'):
params = {'filter': filter_name}
resp = self.send('images/', data=params)
return resp['images']
def sizes(self):
resp = self.send('sizes/')
return resp['sizes']
def all_ssh_keys(self):
resp = self.send('account/keys')
return resp['ssh_keys']
def all_domains(self):
resp = self.send('domains/')
return resp['domains']
def show_droplet(self, droplet_id):
resp = self.send('droplets/%s' % droplet_id)
return resp['droplet']
def all_tags(self):
resp = self.send('tags')
return resp['tags']
class DigitalOceanInventory(object):
###########################################################################
# Main execution path
###########################################################################
def __init__(self):
"""Main execution path """
# DigitalOceanInventory data
self.data = {} # All DigitalOcean data
self.inventory = {} # Ansible Inventory
# Define defaults
self.cache_path = '.'
self.cache_max_age = 0
self.use_private_network = False
self.group_variables = {}
# Read settings, environment variables, and CLI arguments
self.read_settings()
self.read_environment()
self.read_cli_args()
# Verify credentials were set
if not hasattr(self, 'api_token'):
msg = 'Could not find values for DigitalOcean api_token. They must be specified via either ini file, ' \
'command line argument (--api-token), or environment variables (DO_API_TOKEN)\n'
sys.stderr.write(msg)
sys.exit(-1)
# env command, show DigitalOcean credentials
if self.args.env:
print("DO_API_TOKEN=%s" % self.api_token)
sys.exit(0)
# Manage cache
self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache"
self.cache_refreshed = False
if self.is_cache_valid():
self.load_from_cache()
if len(self.data) == 0:
if self.args.force_cache:
sys.stderr.write('Cache is empty and --force-cache was specified\n')
sys.exit(-1)
self.manager = DoManager(self.api_token)
# Pick the json_data to print based on the CLI command
if self.args.droplets:
self.load_from_digital_ocean('droplets')
json_data = {'droplets': self.data['droplets']}
elif self.args.regions:
self.load_from_digital_ocean('regions')
json_data = {'regions': self.data['regions']}
elif self.args.images:
self.load_from_digital_ocean('images')
json_data = {'images': self.data['images']}
elif self.args.sizes:
self.load_from_digital_ocean('sizes')
json_data = {'sizes': self.data['sizes']}
elif self.args.ssh_keys:
self.load_from_digital_ocean('ssh_keys')
json_data = {'ssh_keys': self.data['ssh_keys']}
elif self.args.domains:
self.load_from_digital_ocean('domains')
json_data = {'domains': self.data['domains']}
elif self.args.tags:
self.load_from_digital_ocean('tags')
json_data = {'tags': self.data['tags']}
elif self.args.all:
self.load_from_digital_ocean()
json_data = self.data
elif self.args.host:
json_data = self.load_droplet_variables_for_host()
else: # '--list' this is last to make it default
self.load_from_digital_ocean('droplets')
self.build_inventory()
json_data = self.inventory
if self.cache_refreshed:
self.write_to_cache()
if self.args.pretty:
print(json.dumps(json_data, indent=2))
else:
print(json.dumps(json_data))
###########################################################################
# Script configuration
###########################################################################
def read_settings(self):
""" Reads the settings from the digital_ocean.ini file """
config = ConfigParser.ConfigParser()
config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'digital_ocean.ini')
config.read(config_path)
# Credentials
if config.has_option('digital_ocean', 'api_token'):
self.api_token = config.get('digital_ocean', 'api_token')
# Cache related
if config.has_option('digital_ocean', 'cache_path'):
self.cache_path = config.get('digital_ocean', 'cache_path')
if config.has_option('digital_ocean', 'cache_max_age'):
self.cache_max_age = config.getint('digital_ocean', 'cache_max_age')
# Private IP Address
if config.has_option('digital_ocean', 'use_private_network'):
self.use_private_network = config.getboolean('digital_ocean', 'use_private_network')
# Group variables
if config.has_option('digital_ocean', 'group_variables'):
self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables'))
def read_environment(self):
""" Reads the settings from environment variables """
# Setup credentials
if os.getenv("DO_API_TOKEN"):
self.api_token = os.getenv("DO_API_TOKEN")
if os.getenv("DO_API_KEY"):
self.api_token = os.getenv("DO_API_KEY")
def read_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials')
parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)')
parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet')
parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON')
parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON')
parser.add_argument('--regions', action='store_true', help='List Regions as JSON')
parser.add_argument('--images', action='store_true', help='List Images as JSON')
parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON')
parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON')
parser.add_argument('--domains', action='store_true', help='List Domains as JSON')
parser.add_argument('--tags', action='store_true', help='List Tags as JSON')
parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results')
parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)')
parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)')
parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache')
parser.add_argument('--refresh-cache', '-r', action='store_true', default=False,
help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')
parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN')
parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token')
self.args = parser.parse_args()
if self.args.api_token:
self.api_token = self.args.api_token
# Make --list default if none of the other commands are specified
if (not self.args.droplets and not self.args.regions and
not self.args.images and not self.args.sizes and
not self.args.ssh_keys and not self.args.domains and
not self.args.tags and
not self.args.all and not self.args.host):
self.args.list = True
###########################################################################
# Data Management
###########################################################################
def load_from_digital_ocean(self, resource=None):
"""Get JSON from DigitalOcean API """
if self.args.force_cache and os.path.isfile(self.cache_filename):
return
# We always get fresh droplets
if self.is_cache_valid() and not (resource == 'droplets' or resource is None):
return
if self.args.refresh_cache:
resource = None
if resource == 'droplets' or resource is None:
self.data['droplets'] = self.manager.all_active_droplets()
self.cache_refreshed = True
if resource == 'regions' or resource is None:
self.data['regions'] = self.manager.all_regions()
self.cache_refreshed = True
if resource == 'images' or resource is None:
self.data['images'] = self.manager.all_images()
self.cache_refreshed = True
if resource == 'sizes' or resource is None:
self.data['sizes'] = self.manager.sizes()
self.cache_refreshed = True
if resource == 'ssh_keys' or resource is None:
self.data['ssh_keys'] = self.manager.all_ssh_keys()
self.cache_refreshed = True
if resource == 'domains' or resource is None:
self.data['domains'] = self.manager.all_domains()
self.cache_refreshed = True
if resource == 'tags' or resource is None:
self.data['tags'] = self.manager.all_tags()
self.cache_refreshed = True
def add_inventory_group(self, key):
""" Method to create group dict """
host_dict = {'hosts': [], 'vars': {}}
self.inventory[key] = host_dict
return
def add_host(self, group, host):
""" Helper method to reduce host duplication """
if group not in self.inventory:
self.add_inventory_group(group)
if host not in self.inventory[group]['hosts']:
self.inventory[group]['hosts'].append(host)
return
def build_inventory(self):
""" Build Ansible inventory of droplets """
self.inventory = {
'all': {
'hosts': [],
'vars': self.group_variables
},
'_meta': {'hostvars': {}}
}
# add all droplets by id and name
for droplet in self.data['droplets']:
for net in droplet['networks']['v4']:
if net['type'] == 'public':
dest = net['ip_address']
else:
continue
self.inventory['all']['hosts'].append(dest)
self.add_host(droplet['id'], dest)
self.add_host(droplet['name'], dest)
# groups that are always present
for group in ('digital_ocean',
'region_' + droplet['region']['slug'],
'image_' + str(droplet['image']['id']),
'size_' + droplet['size']['slug'],
'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']),
'status_' + droplet['status']):
self.add_host(group, dest)
# groups that are not always present
for group in (droplet['image']['slug'],
droplet['image']['name']):
if group:
image = 'image_' + DigitalOceanInventory.to_safe(group)
self.add_host(image, dest)
if droplet['tags']:
for tag in droplet['tags']:
self.add_host(tag, dest)
# hostvars
info = self.do_namespace(droplet)
self.inventory['_meta']['hostvars'][dest] = info
def load_droplet_variables_for_host(self):
""" Generate a JSON response to a --host call """
host = int(self.args.host)
droplet = self.manager.show_droplet(host)
info = self.do_namespace(droplet)
return {'droplet': info}
###########################################################################
# Cache Management
###########################################################################
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_filename):
mod_time = os.path.getmtime(self.cache_filename)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
return True
return False
def load_from_cache(self):
""" Reads the data from the cache file and assigns it to member variables as Python Objects """
try:
with open(self.cache_filename, 'r') as cache:
json_data = cache.read()
data = json.loads(json_data)
except IOError:
data = {'data': {}, 'inventory': {}}
self.data = data['data']
self.inventory = data['inventory']
def write_to_cache(self):
""" Writes data in JSON format to a file """
data = {'data': self.data, 'inventory': self.inventory}
json_data = json.dumps(data, indent=2)
with open(self.cache_filename, 'w') as cache:
cache.write(json_data)
###########################################################################
# Utilities
###########################################################################
@staticmethod
def to_safe(word):
""" Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """
return re.sub(r"[^A-Za-z0-9\-.]", "_", word)
@staticmethod
def do_namespace(data):
""" Returns a copy of the dictionary with all the keys put in a 'do_' namespace """
info = {}
for k, v in data.items():
info['do_' + k] = v
return info
###########################################################################
# Run the script
DigitalOceanInventory()
| |
#!/usr/bin/python -u
'''
simulate.py - simulation script for the 2012 UCC Programming Competition
NOTE: This is not the manager program for a stratego game
It merely calls the manager program as appropriate, and records results
Plays exactly ONE round, but does not overwrite previously played rounds
eg: run once to generate round1.results, twice to generate round2.results etc
Also generates total.scores based on results from every round.
Now (sortof) generates .html files to display results in a prettiful manner.
THIS FILE IS TERRIBLE
author Sam Moore (matches) [SZM]
website http://matches.ucc.asn.au/stratego
email progcomp@ucc.asn.au or matches@ucc.asn.au
git git.ucc.asn.au/progcomp2012.git
'''
import os
import sys
from time import time
#Global variables/arguments
baseDirectory = "../.." #Base directory for results, logs, agents
nGames = 2 #Number of games played by each agent against each opponent. Half will be played as RED, half as BLUE. If nGames <= 1, then no games will be played (useful for dry run?)
nRounds = 1
timeoutValue = 2
if len(sys.argv) >= 2:
nRounds = int(sys.argv[1])
if len(sys.argv) >= 3:
nGames = int(sys.argv[2])
if nGames % 2 != 0:
print "Warning: nGames should be even. "+str(nGames)+" specified, but only " + str(int(nGames/2) * 2)+" will be played!"
if len(sys.argv) >= 4:
baseDirectory = sys.argv[3]
if len(sys.argv) >= 6:
print "Useage: " +sys.argv[0] + " [nRounds=1] [nGames=10] [baseDirectory=\""+baseDirectory+"\"] [managerPath=baseDirectory+\"/judge/manager/stratego\"]"
sys.exit(1)
resultsDirectory = baseDirectory+"/web/results/" #Where results will go (results are in the form of text files of agent names and scores)
logDirectory = baseDirectory+"/web/log/" #Where log files go (direct output of manager program)
agentsDirectory = baseDirectory+"/agents/" #Where agents are found (each agent has its own subdirectory within this directory)
managerPath = baseDirectory+"/judge/manager/stratego" #Path to the executable that plays the games
if len(sys.argv) >= 5:
managerPath = sys.argv[5]
#Score dictionary - Tuple is of the form: (end score, other score, other result) where end is the player on whose turn the result occurs, other is the other player, other result indicates what to record the outcome as for the other player.
scores = {"VICTORY":(0.3,0.1, "DEFEAT"), "DEFEAT":(0.1,0.3, "VICTORY"), "SURRENDER":(0,0.3, "VICTORY"), "DRAW":(0.2,0.2, "DRAW"), "DRAW_DEFAULT":(0.1,0.1, "DRAW_DEFAULT"), "ILLEGAL":(-0.1,0.2, "DEFAULT"), "DEFAULT":(0.2,-0.1, "ILLEGAL"), "BOTH_ILLEGAL":(-0.1,-0.1, "BOTH_ILLEGAL"), "INTERNAL_ERROR":(0,0, "INTERNAL_ERROR"), "BAD_SETUP":(0,0,"BAD_SETUP")}
#Verbose - print lots of useless stuff about what you are doing (kind of like matches talking on irc...)
verbose = True
#Check the manager program exists TODO: And is executable!
if os.path.exists(managerPath) == False:
print "Manager program at \""+managerPath+"\" doesn't exist!"
sys.exit(1)
#Make necessary directories
if os.path.exists(resultsDirectory) == False:
os.mkdir(resultsDirectory) #Make the results directory if it didn't exist
#Identify the round number by reading from the "info" file in the results directory, if it doesn't exist then start at round 1.
if os.path.exists(resultsDirectory+"info") == False:
totalRounds = 1
else:
info = open(resultsDirectory+"info", "r")
totalRounds = int(info.readline().strip())
info.close()
os.remove(resultsDirectory+"info")
info = open(resultsDirectory+"info", "w")
info.write(str(totalRounds + nRounds) + "\n")
info.close()
if os.path.exists(logDirectory) == False:
os.mkdir(logDirectory) #Make the log directory if it didn't exist
startTime = time() #Record time at which simulation starts
if verbose:
if nRounds > 1:
print "Simulating " + str(nRounds) + " rounds (" + str(totalRounds) + " to " + str(totalRounds + nRounds-1) + ")"
else:
print "Simulating one round."
print ""
print "Identifying possible agents in \""+agentsDirectory+"\""
#Get all agent names from agentsDirectory
agentNames = os.listdir(agentsDirectory)
agents = []
for name in agentNames:
if verbose:
sys.stdout.write("Scan \""+name+"\"... ")
if os.path.isdir(agentsDirectory+name) == False: #Remove non-directories
if verbose:
sys.stdout.write(" Invalid! (Not a directory)\n")
continue
if os.path.exists(agentsDirectory+name+"/info") == False: #Try and find the special "info" file in each directory; ignore if it doesn't exist
if verbose:
sys.stdout.write(" Invalid! (No \"info\" file found)\n")
continue
infoFile = open(agentsDirectory+name+"/info", "r")
agentExecutable = agentsDirectory+name+"/"+(infoFile.readline().strip())
author = infoFile.readline().strip()
language = infoFile.readline().strip()
description = ""
while True:
line = infoFile.readline()
if len(line) > 0:
description += line
else:
break
infoFile.close()
if os.path.exists(agentExecutable.split(" ")[0]) == False:
if verbose:
sys.stdout.write(" Invalid! (Path: \""+agentExecutable+"\" does not exist!)\n")
continue
if verbose:
sys.stdout.write(" Valid! (Path: \""+agentExecutable+"\")\n")
#Convert array of valid names into array of dictionaries containing information about each agent
#I'm starting to like python...
agents.append({"name":name, "path":agentExecutable, "author":author, "language":language, "description":description, "score":[0], "VICTORY":[], "DEFEAT":[], "DRAW":[], "ILLEGAL":[], "DEFAULT":[], "INTERNAL_ERROR":[], "SURRENDER":[], "DRAW_DEFAULT":[], "BOTH_ILLEGAL":[], "BAD_SETUP":[], "ALL":[], "totalScore":0, "Wins":0, "Losses":0, "Draws":0, "Illegal":0, "Errors":0})
if len(agents) == 0:
print "Couldn't find any agents! Check paths (Edit this script) or generate \"info\" files for agents."
sys.exit(0)
if verbose:
print "Total: " + str(len(agents)) + " valid agents found (From "+str(len(agentNames))+" possibilities)"
print ""
#Prepare the pretty .html files if they don't exist
if verbose:
print "Preparing .html results files..."
#BACKUP THE RESULTS DIRECTORY GOD DAMMIT
os.system("mkdir .before_round"+str(totalRounds)+"_BACKUP/; cp " + resultsDirectory+"*" + " .before_round"+str(totalRounds)+"_BACKUP/")
if os.path.exists(resultsDirectory + "index.html") == True:
os.remove(resultsDirectory + "index.html") #Delete the file
totalFile = open(resultsDirectory + "index.html", "w")
totalFile.write("<html>\n<head>\n <title> Round in progress... </title>\n</head>\n<body>\n")
if nRounds > 1:
totalFile.write("<h1> Rounds " + str(totalRounds) + " to " + str(totalRounds + nRounds-1) + " in progress...</h1>\n")
else:
totalFile.write("<h1> Round " + str(totalRounds) + " in progress...</h1>\n")
totalFile.write("<p> Please wait for the rounds to finish. You can view the current progress by watching the <a href = \"../log\"/>Log Files</a> </p>")
if totalRounds > 1:
totalFile.write("<h2> Round Summaries </h2>\n")
totalFile.write("<table border=\"0\" cellpadding=\"10\">\n")
for i in range(1, totalRounds):
totalFile.write("<tr> <td> <a href=round"+str(i)+".html>Round " + str(i) + "</a> </td> </tr>\n")
totalFile.write("</table>\n")
totalFile.write("</body>\n<!-- Total Results file autogenerated by \"" + sys.argv[0] + "\" at time " + str(time()) + " -->\n</html>\n\n")
totalFile.close()
for agent in agents:
if os.path.exists(resultsDirectory+agent["name"] + ".html") == False:
agentFile = open(resultsDirectory+agent["name"] + ".html", "w")
agentFile.write("<html>\n<head>\n <title> " + agent["name"] + " overview</title>\n</head>\n<body>\n<h1> Overview for " + agent["name"]+" </h1>\n")
agentFile.write("<table border=\"0\" cellpadding=\"10\">\n")
agentFile.write("<tr> <th> Name </th> <th> Author </th> <th> Language </th> </tr>\n")
agentFile.write("<tr> <td> "+agent["name"]+" </td> <td> "+agent["author"]+" </td> <td> "+agent["language"]+" </td> </tr>\n")
agentFile.write("</table>\n");
agentFile.write("<p> <b>Description</b> </p>\n")
agentFile.write("<p> " + agent["description"] + " </p>\n")
agentFile.close()
os.rename(resultsDirectory+agent["name"] + ".html", "tmpfile")
oldFile = open("tmpfile", "r")
agentFile = open(resultsDirectory+agent["name"] + ".html", "w")
line = oldFile.readline()
while line != "":
#if verbose:
# print "Interpreting line \"" + line.strip() + "\""
if line.strip() == "</body>" or line.strip() == "<!--end-->":
break
elif line == "<h3> Round Overview </h3>\n":
agentFile.write(line)
line = oldFile.readline()
agentFile.write(line)
line = oldFile.readline()
if line == "<tr> <th> Score </th> <th> Wins </th> <th> Losses </th> <th> Draws </th> <th> Illegal </th> <th> Errors </th></tr>\n":
#sys.stdout.write("Adding scores... " + line + "\n")
agentFile.write(line)
line = oldFile.readline()
values = line.split(' ')
agent["totalScore"] += float(values[2].strip())
agent["Wins"] += int(values[5].strip())
agent["Losses"] += int(values[8].strip())
agent["Draws"] += int(values[11].strip())
agent["Illegal"] += int(values[14].strip())
agent["Errors"] += int(values[17].strip())
agentFile.write(line)
line = oldFile.readline()
if verbose:
print "Prepared results file \"" + resultsDirectory+agent["name"] + ".html\"."
oldFile.close()
agentFile.close()
os.remove("tmpfile")
if verbose:
print ""
#Do each round...
totalGames = nGames/2 * len(agents) * (len(agents)-1)
for roundNumber in range(totalRounds, totalRounds + nRounds):
if os.path.exists(logDirectory + "round"+str(roundNumber)) == False:
os.mkdir(logDirectory + "round"+str(roundNumber)) #Check there is a directory for this round's logs
for agent in agents:
agent.update({"name":agent["name"], "path":agent["path"], "score":[0], "VICTORY":[], "DEFEAT":[], "DRAW":[], "ILLEGAL":[], "DEFAULT":[], "INTERNAL_ERROR":[], "SURRENDER":[], "DRAW_DEFAULT":[], "BOTH_ILLEGAL":[], "BAD_SETUP":[], "ALL":[]})
print "Commencing ROUND " + str(roundNumber) + " combat!"
print "Total: " + str(totalGames) + " games to be played. This could take a while..."
managerErrors = 0
#This double for loop simulates a round robin, with each agent getting the chance to play as both red and blue against every other agent.
gameNumber = 0
for red in agents: #for each agent playing as red,
for blue in agents: #against each other agent, playing as blue
if red == blue:
continue #Exclude battles against self
for i in range(1, nGames/2 + 1):
gameNumber += 1
gameID = str(roundNumber) + "." + str(gameNumber)
#Play a game and read the result. Note the game is logged to a file based on the agent's names
if verbose:
sys.stdout.write("Agents: \""+red["name"]+"\" and \""+blue["name"]+"\" playing game (ID: " + gameID + ") ... ")
logFile = logDirectory + "round"+str(roundNumber) + "/"+red["name"]+".vs."+blue["name"]+"."+str(gameID)
errorLog = [logDirectory + "error/" + red["name"] + "."+str(gameID), logDirectory + "error/" + blue["name"] + "."+str(gameID)]
#Run the game, outputting to logFile; stderr of (both) AI programs is directed to logFile.stderr
outline = os.popen(managerPath + " -m 1000 -o " + logFile + " -T " + str(timeoutValue) + " \"" + red["path"] + "\" \"" + blue["path"] + "\" 2>> " + logFile+".stderr", "r").read()
#os.system("mv tmp.mp4 " + logFile + ".mp4")
#If there were no errors, get rid of the stderr file
if os.stat(logFile+".stderr").st_size <= 0:
os.remove(logFile+".stderr")
results = outline.split(' ')
if len(results) != 6:
if verbose:
sys.stdout.write("Garbage output! \"" + outline + "\"\n")
red["INTERNAL_ERROR"].append((blue["name"], gameID, scores["INTERNAL_ERROR"][0]))
blue["INTERNAL_ERROR"].append((red["name"], gameID, scores["INTERNAL_ERROR"][0]))
red["ALL"].append((blue["name"], gameID, scores["INTERNAL_ERROR"][0], "INTERNAL_ERROR", "RED"))
blue["ALL"].append((red["name"], gameID, scores["INTERNAL_ERROR"][0], "INTERNAL_ERROR", "BLUE"))
managerErrors += 1
else:
if results[1] == "RED":
endColour = red
otherColour = blue
endStr = "RED"
otherStr = "BLUE"
elif results[1] == "BLUE":
endColour = blue
otherColour = red
endStr = "BLUE"
otherStr = "RED"
if results[1] == "BOTH":
red["INTERNAL_ERROR"].append((blue["name"], gameID, scores["INTERNAL_ERROR"][0]))
blue["INTERNAL_ERROR"].append((red["name"], gameID, scores["INTERNAL_ERROR"][0]))
red["ALL"].append((blue["name"], gameID, scores["INTERNAL_ERROR"][0], "INTERNAL_ERROR", "RED"))
blue["ALL"].append((red["name"], gameID, scores["INTERNAL_ERROR"][0], "INTERNAL_ERROR", "BLUE"))
managerErrors += 1
else:
endColour["score"].insert(0,endColour["score"][0] + scores[results[2]][0])
endColour[results[2]].append((otherColour["name"], gameID, scores[results[2]][0]))
endColour["ALL"].append((otherColour["name"], gameID, scores[results[2]][0], results[2], endStr))
otherColour["score"].insert(0, otherColour["score"][0] + scores[results[2]][1])
otherColour[scores[results[2]][2]].append((endColour["name"], gameID, scores[results[2]][1]))
otherColour["ALL"].append((endColour["name"], gameID, scores[results[2]][1], scores[results[2]][2], otherStr))
#Write scores to raw text files
for agent in [endColour, otherColour]:
scoreFile = open(resultsDirectory + agent["name"] + ".scores", "a")
scoreFile.write(str(agent["totalScore"] + agent["score"][0]) + "\n")
scoreFile.close()
if verbose:
sys.stdout.write(" Result \"")
for ii in range(1, len(results)):
sys.stdout.write(results[ii].strip())
if ii < (len(results) - 1):
sys.stdout.write(" ")
sys.stdout.write("\"\n")
if verbose:
print "Completed combat. Total of " + str(gameNumber) + " games played. "
if managerErrors != 0:
print "WARNING: Registered "+str(managerErrors)+" errors. Check the manager program."
if verbose:
print ""
#We should now have complete score values.
if verbose:
print "RESULTS FOR ROUND " + str(roundNumber)
#totalFile = open(resultsDirectory+"total.scores", "w") #Recreate the file
#for agent in agents:
#totalFile.write(agent["name"] + " " + str(agent["totalScore"]) +"\n") #Write the total scores in descending order
#if verbose:
# print "Agent: " + str(agent)
if verbose:
print "Updating pretty .html files... "
for agent in agents:
agentFile = open(resultsDirectory + agent["name"]+".html", "a")
agentFile.write("<h2> Round " + str(roundNumber) + "</h2>\n")
agentFile.write("<h3> Round Overview </h3>\n")
agentFile.write("<table border=\"0\" cellpadding=\"10\">\n")
agentFile.write("<tr> <th> Score </th> <th> Wins </th> <th> Losses </th> <th> Draws </th> <th> Illegal </th> <th> Errors </th></tr>\n")
agentFile.write("<tr> <td> "+str(agent["score"][0])+" </td> <td> "+str(len(agent["VICTORY"]) + len(agent["DEFAULT"]))+" </td> <td> "+str(len(agent["DEFEAT"]) + len(agent["SURRENDER"]))+" </td> <td> "+str(len(agent["DRAW"]) + len(agent["DRAW_DEFAULT"]))+" </td> <td> "+str(len(agent["ILLEGAL"]) + len(agent["BOTH_ILLEGAL"]) + len(agent["BAD_SETUP"]))+" </td> <td> " +str(len(agent["INTERNAL_ERROR"]))+" </td> </tr>\n")
agentFile.write("</table>\n")
agentFile.write("<p> <a href=round"+str(roundNumber)+".html>Round "+str(roundNumber) + " Scoreboard</a></p>\n")
agentFile.write("<h3> Detailed </h3>\n")
agentFile.write("<table border=\"0\" cellpadding=\"10\">\n")
agentFile.write("<tr> <th> Game ID </th> <th> Opponent </th> <th> Played as </th> <th> Outcome </th> <th> Score </th> <th> Accumulated Score </th> </tr> </th>\n")
for index in range(0, len(agent["ALL"])):
if agent["ALL"][index][4] == "RED":
logFile = "../log/round"+str(roundNumber) + "/"+agent["name"]+".vs."+agent["ALL"][index][0]+"."+str(agent["ALL"][index][1])
else:
logFile = "../log/round"+str(roundNumber) + "/"+agent["ALL"][index][0]+".vs."+agent["name"]+"."+str(agent["ALL"][index][1])
agentFile.write("<tr> <td> <a href="+logFile+">" + str(agent["ALL"][index][1]) + " </a> </td> <td> <a href="+agent["ALL"][index][0]+".html>"+agent["ALL"][index][0] + " </a> </td> <td> " + agent["ALL"][index][4] + " </td> <td> " + agent["ALL"][index][3] + " </td> <td> " + str(agent["ALL"][index][2]) + "</td> <td> " + str(agent["score"][len(agent["score"])-index -2]) + " </td> </tr> </th>\n")
agentFile.write("</table>\n")
agent["totalScore"] += agent["score"][0]
agent["Wins"] += len(agent["VICTORY"]) + len(agent["DEFAULT"])
agent["Losses"] += len(agent["DEFEAT"]) + len(agent["SURRENDER"])
agent["Draws"] += len(agent["DRAW"]) + len(agent["DRAW_DEFAULT"])
agent["Illegal"] += len(agent["ILLEGAL"]) + len(agent["BOTH_ILLEGAL"]) + len(agent["BAD_SETUP"])
agent["Errors"] += len(agent["INTERNAL_ERROR"])
agentFile.write("<h3> Accumulated Results </h3>\n")
agentFile.write("<table border=\"0\" cellpadding=\"10\">\n")
agentFile.write("<tr> <th> Score </th> <th> Wins </th> <th> Losses </th> <th> Draws </th> <th> Illegal </th> <th> Errors </th></tr>\n")
agentFile.write("<tr> <td> "+str(agent["totalScore"])+" </td> <td> "+str(agent["Wins"])+" </td> <td> "+str(agent["Losses"])+" </td> <td> "+str(agent["Draws"])+" </td> <td> "+str(agent["Illegal"])+" </td> <td> " +str(agent["Errors"])+" </td> </tr>\n")
agentFile.write("</table>\n")
agentFile.close()
#Update round file
roundFile = open(resultsDirectory + "round"+str(roundNumber)+".html", "w")
roundFile.write("<html>\n<head>\n <title> Round " +str(roundNumber)+ " Overview </title>\n</head>\n<body>\n")
roundFile.write("<h1> Round " +str(roundNumber)+ " Overview </h1>\n")
roundFile.write("<table border=\"0\" cellpadding=\"10\">\n")
roundFile.write("<tr> <th> Name </th> <th> Score </th> <th> Total Score </th> </tr>\n")
agents.sort(key = lambda e : e["score"][0], reverse=True)
for agent in agents:
roundFile.write("<tr> <td> <a href="+agent["name"]+".html>"+agent["name"] + " </a> </td> <td> " + str(agent["score"][0]) + " </td> <td> " + str(agent["totalScore"]) + " </td> </tr>\n")
roundFile.write("</table>\n")
command = "cp scores.plt " + resultsDirectory + "scores.plt;"
os.system(command)
scorePlot = open(resultsDirectory + "scores.plt", "a")
scorePlot.write("plot ")
for i in range(0, len(agents)):
if i > 0:
scorePlot.write(", ")
scorePlot.write("\""+agents[i]["name"]+".scores\" using ($0+1):1 with linespoints title \""+agents[i]["name"]+"\"")
scorePlot.write("\nexit\n")
scorePlot.close()
command = "d=$(pwd); cd " + resultsDirectory + ";"
command += "gnuplot scores.plt;"
command += "rm -f scores.plt;"
command += "mv scores.png round"+str(roundNumber)+".png;"
command += "cd $d;"
os.system(command)
roundFile.write("<h2> Accumulated Scores - up to Round " + str(roundNumber)+" </h2>\n")
roundFile.write("<img src=\"round"+str(roundNumber)+".png\" alt = \"round"+str(roundNumber)+".png\" title = \"round"+str(roundNumber)+".png\" width = \"640\" height = \"480\"/>\n")
roundFile.write("<p> <a href=index.html>Current Scoreboard</a></p>\n")
roundFile.write("</body>\n<!-- Results file for Round " + str(roundNumber) + " autogenerated by \"" + sys.argv[0] + "\" at time " + str(time()) + " -->\n</html>\n\n")
roundFile.close()
if verbose:
print "Finalising .html files... "
for agent in agents:
agentFile = open(resultsDirectory + agent["name"]+".html", "a")
agentFile.write("<!--end-->\n")
#Write a graph
#Comment out if you don't have gnuplot
command = "rm -f " + agent["name"] + ".png;"
command += "cp template.plt " + resultsDirectory + agent["name"] + ".plt;"
command += "d=$(pwd); cd " + resultsDirectory + ";"
command += "sed -i \"s:\[NAME\]:"+agent["name"]+":g\" " +resultsDirectory + agent["name"]+".plt;"
command += "gnuplot " + resultsDirectory + agent["name"]+".plt;"
command += "rm -f " + resultsDirectory + agent["name"] + ".plt;"
command += "cd $d;"
os.system(command)
agentFile.write("<!--end-->\n")
agentFile.write("<h3> Score Graph </h3>\n")
agentFile.write("<img src=\""+agent["name"]+".png\" alt=\""+agent["name"]+".png\" title=\""+agent["name"]+".png\" width=\"640\" height=\"480\"/>\n")
#Link to main file
agentFile.write("<p> <a href=\"index.html\"/>Total Statistics</a> </p>\n")
agentFile.write("</body>\n<!-- Results file for \"" + agent["name"] + "\" autogenerated by \"" + sys.argv[0] + "\" at time " + str(time()) + " -->\n</html>\n\n")
agentFile.close()
if os.path.exists(resultsDirectory + "index.html") == True:
os.remove(resultsDirectory + "index.html") #Delete the file
totalFile = open(resultsDirectory + "index.html", "w")
totalFile.write("<html>\n<head>\n <title> Total Overview </title>\n</head>\n<body>\n")
totalFile.write("<h1> Total Overview </h1>\n")
totalFile.write("<table border=\"0\" cellpadding=\"10\">\n")
totalFile.write("<tr> <th> Name </th> <th> Total Score </th> </tr>\n")
agents.sort(key = lambda e : e["totalScore"], reverse=True)
for agent in agents:
totalFile.write("<tr> <td> <a href="+agent["name"]+".html>"+agent["name"] + " </a> </td> <td> " + str(agent["totalScore"]) + " </td> </tr>\n")
totalFile.write("</table>\n")
totalFile.write("<h2> Score Graph </h2>\n")
command = "d=$(pwd);"
command += "cd " + resultsDirectory + ";"
command += "rm -f scores.png;"
command += "cp round"+str(roundNumber)+".png scores.png;"
command += "cd $d;"
os.system(command)
totalFile.write("<img src=\"scores.png\" alt=\"scores.png\" title=\"scores.png\" width=\"640\" height=\"480\"/>\n")
totalFile.write("<h2> Round Summaries </h2>\n")
totalFile.write("<table border=\"0\" cellpadding=\"10\">\n")
for i in range(1, totalRounds+1):
totalFile.write("<tr> <td> <a href=round"+str(i)+".html>Round " + str(i) + "</a> </td> </tr>\n")
totalFile.write("</table>\n")
totalFile.write("</body>\n<!-- Total Results file autogenerated by \"" + sys.argv[0] + "\" at time " + str(time()) + " -->\n</html>\n\n")
totalFile.close()
#Write results to a raw text file as well
textResults = open(resultsDirectory + "total.txt", "w")
for agent in agents:
textResults.write(agent["name"] + " " + str(agent["totalScore"]) + "\n")
textResults.close()
if verbose:
print "Done!"
endTime = time()
print "Completed simulating " + str(nRounds) + " rounds in " + str(endTime - startTime) + " seconds."
sys.exit(0)
| |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.utils.html import escape
from horizon.workflows import views
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.subnets import tables\
as subnets_tables
from openstack_dashboard.dashboards.project.networks import tables\
as networks_tables
from openstack_dashboard.dashboards.project.networks import workflows
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
INDEX_URL = reverse('horizon:project:networks:index')
def form_data_subnet(subnet,
name=None, cidr=None, ip_version=None,
gateway_ip='', enable_dhcp=None,
allocation_pools=None,
dns_nameservers=None,
host_routes=None):
def get_value(value, default):
return default if value is None else value
data = {}
data['subnet_name'] = get_value(name, subnet.name)
data['cidr'] = get_value(cidr, subnet.cidr)
data['ip_version'] = get_value(ip_version, subnet.ip_version)
gateway_ip = subnet.gateway_ip if gateway_ip == '' else gateway_ip
data['gateway_ip'] = gateway_ip or ''
data['no_gateway'] = (gateway_ip is None)
data['enable_dhcp'] = get_value(enable_dhcp, subnet.enable_dhcp)
if data['ip_version'] == 6:
data['ipv6_modes'] = subnet.ipv6_modes
pools = get_value(allocation_pools, subnet.allocation_pools)
data['allocation_pools'] = _str_allocation_pools(pools)
nameservers = get_value(dns_nameservers, subnet.dns_nameservers)
data['dns_nameservers'] = _str_dns_nameservers(nameservers)
routes = get_value(host_routes, subnet.host_routes)
data['host_routes'] = _str_host_routes(routes)
return data
def form_data_no_subnet():
return {'subnet_name': '',
'cidr': '',
'ip_version': 4,
'gateway_ip': '',
'no_gateway': False,
'enable_dhcp': True,
'allocation_pools': '',
'dns_nameservers': '',
'host_routes': ''}
def _str_allocation_pools(allocation_pools):
if isinstance(allocation_pools, str):
return allocation_pools
return '\n'.join(['%s,%s' % (pool['start'], pool['end'])
for pool in allocation_pools])
def _str_dns_nameservers(dns_nameservers):
if isinstance(dns_nameservers, str):
return dns_nameservers
return '\n'.join(dns_nameservers)
def _str_host_routes(host_routes):
if isinstance(host_routes, str):
return host_routes
return '\n'.join(['%s,%s' % (route['destination'], route['nexthop'])
for route in host_routes])
class NetworkTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_index(self):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 5
quota_data['subnets']['available'] = 5
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_index_network_list_exception(self):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 5
quota_data['subnets']['available'] = 5
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).MultipleTimes().AndRaise(self.exceptions.neutron)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
self.assertEqual(len(res.context['networks_table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail(self):
self._test_network_detail()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_with_mac_learning(self):
self._test_network_detail(mac_learning=True)
def _test_network_detail(self, mac_learning=False):
quota_data = self.quota_usages.first()
quota_data['subnets']['available'] = 5
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_network_exception(self):
self._test_network_detail_network_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_network_exception_with_mac_learning(self):
self._test_network_detail_network_exception(mac_learning=True)
def _test_network_detail_network_exception(self, mac_learning=False):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndRaise(self.exceptions.neutron)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:detail', args=[network_id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_subnet_exception(self):
self._test_network_detail_subnet_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_subnet_exception_with_mac_learning(self):
self._test_network_detail_subnet_exception(mac_learning=True)
def _test_network_detail_subnet_exception(self, mac_learning=False):
network_id = self.networks.first().id
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 5
quota_data['subnets']['available'] = 5
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertEqual(len(subnets), 0)
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_port_exception(self):
self._test_network_detail_port_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_port_exception_with_mac_learning(self):
self._test_network_detail_port_exception(mac_learning=True)
def _test_network_detail_port_exception(self, mac_learning=False):
network_id = self.networks.first().id
quota_data = self.quota_usages.first()
quota_data['subnets']['available'] = 5
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertEqual(len(ports), 0)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_get(self,
test_with_profile=False):
if test_with_profile:
net_profiles = self.net_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:create')
res = self.client.get(url)
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.CreateNetwork.name)
expected_objs = ['<CreateNetworkInfo: createnetworkinfoaction>',
'<CreateSubnetInfo: createsubnetinfoaction>',
'<CreateSubnetDetail: createsubnetdetailaction>']
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_get_with_profile(self):
self.test_network_create_get(test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post(self,
test_with_profile=False):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_profile(self):
self.test_network_create_post(test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'subnet_create',
'profile_list',)})
def test_network_create_post_with_subnet(self,
test_with_profile=False,
test_with_ipv6=True):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
subnet_params = {'network_id': network.id,
'name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip,
'enable_dhcp': subnet.enable_dhcp}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
if not test_with_ipv6:
subnet.ip_version = 4
subnet_params['ip_version'] = subnet.ip_version
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
**subnet_params).AndReturn(subnet)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_w_profile(self):
self.test_network_create_post_with_subnet(test_with_profile=True)
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_ipv6': False})
def test_create_network_with_ipv6_disabled(self):
self.test_network_create_post_with_subnet(test_with_ipv6=False)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post_network_exception(self,
test_with_profile=False):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_nw_exception_w_profile(self):
self.test_network_create_post_network_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'profile_list')})
def test_network_create_post_with_subnet_network_exception(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_nw_exception_w_profile(self):
self.test_network_create_post_with_subnet_network_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'network_delete',
'subnet_create',
'profile_list')})
def test_network_create_post_with_subnet_subnet_exception(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_delete(IsA(http.HttpRequest),
network.id)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_subnet_exception_w_profile(self):
self.test_network_create_post_with_subnet_subnet_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_nocidr(self,
test_with_profile=False):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr='',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, escape('Specify "Network Address" or '
'clear "Create Subnet" checkbox.'))
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_no_cidr_w_profile(self):
self.test_network_create_post_with_subnet_nocidr(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_cidr_without_mask(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr='10.0.0.0',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = "The subnet in the Network Address is too small (/32)."
self.assertContains(res, expected_msg)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_cidr_without_mask_w_profile(self):
self.test_network_create_post_with_subnet_cidr_without_mask(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_cidr_inconsistent(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr=cidr,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertContains(res, expected_msg)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_cidr_inconsistent_w_profile(self):
self.test_network_create_post_with_subnet_cidr_inconsistent(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_gw_inconsistent(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_gw_inconsistent_w_profile(self):
self.test_network_create_post_with_subnet_gw_inconsistent(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/update.html')
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get_exception(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndReturn(network)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post_exception(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete')})
def test_delete_network_no_subnet(self):
network = self.networks.first()
network.subnets = []
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete',
'subnet_delete')})
def test_delete_network_with_subnet(self):
network = self.networks.first()
network.subnets = [subnet.id for subnet in network.subnets]
subnet_id = network.subnets[0]
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest), shared=True)\
.AndReturn([])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet_id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete',
'subnet_delete')})
def test_delete_network_exception(self):
network = self.networks.first()
network.subnets = [subnet.id for subnet in network.subnets]
subnet_id = network.subnets[0]
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet_id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
class NetworkSubnetTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_get', 'subnet_get',)})
def test_subnet_detail(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/subnets/detail.html')
self.assertEqual(res.context['subnet'].id, subnet.id)
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail_exception(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
url = reverse('horizon:project:networks:addsubnet',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes(self):
network = self.networks.list()[1]
subnet = self.subnets.list()[1]
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes_no_gateway(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet, gateway_ip=None)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_subnet_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_cidr_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = form_data_subnet(subnet, cidr=cidr,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertFormErrors(res, 1, expected_msg)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_gw_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_start_only(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# Start only allocation_pools
allocation_pools = '10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_three_entries(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# pool with three entries
allocation_pools = '10.0.0.2,10.0.0.3,10.0.0.4'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_invalid_address(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# end address is not a valid IP address
allocation_pools = '10.0.0.2,invalid_address'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[1])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_ip_network(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# start address is CIDR
allocation_pools = '10.0.0.2/24,10.0.0.5'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[0])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_start_larger_than_end(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# start address is larger than end address
allocation_pools = '10.0.0.254,10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start address is larger than end address '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_nameservers(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_destination_only(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_three_entries(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_invalid_destination(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_nexthop_ip_network(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_v6subnet_create_post(self):
network = self.networks.get(name="v6_net1")
subnet = self.subnets.get(name="v6_subnet1")
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_v6subnet_create_post_with_slaac_attributes(self):
network = self.networks.get(name="v6_net2")
subnet = self.subnets.get(name="v6_subnet2")
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
ipv6_address_mode='slaac',
ipv6_ra_mode='slaac')\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_gateway_ip(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
gateway_ip = '10.0.0.100'
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=gateway_ip,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_no_gateway(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=None,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_additional_attributes(self):
subnet = self.subnets.list()[1]
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=False,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
enable_dhcp=False)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_gw_inconsistent(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_nameservers(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_destination_only(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_three_entries(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_invalid_destination(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_nexthop_ip_network(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete(self):
self._test_subnet_delete()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_with_mac_learning(self):
self._test_subnet_delete(mac_learning=True)
def _test_subnet_delete(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_exception(self):
self._test_subnet_delete_exception()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_exception_with_mac_learning(self):
self._test_subnet_delete_exception(mac_learning=True)
def _test_subnet_delete_exception(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
class NetworkPortTests(test.TestCase):
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_detail(self):
self._test_port_detail()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_detail_with_mac_learning(self):
self._test_port_detail(mac_learning=True)
def _test_port_detail(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(self.ports.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertTemplateUsed(res, 'project/networks/ports/detail.html')
self.assertEqual(res.context['port'].id, port.id)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get(self):
self._test_port_update_get()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get_with_mac_learning(self):
self._test_port_update_get(mac_learning=True)
def _test_port_update_get(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest),
port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/ports/update.html')
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post(self):
self._test_port_update_post()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_with_mac_learning(self):
self._test_port_update_post(mac_learning=True)
def _test_port_update_post(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception(self):
self._test_port_update_post_exception()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception_with_mac_learning(self):
self._test_port_update_post_exception(mac_learning=True)
def _test_port_update_post_exception(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
**extension_kwargs)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
class NetworkViewTests(test.TestCase):
def _test_create_button_disabled_when_quota_exceeded(
self, expected_string, network_quota=5, subnet_quota=5):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = network_quota
quota_data['subnets']['available'] = subnet_quota
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_network_create_button_disabled_when_quota_exceeded_index(self):
create_link = networks_tables.CreateNetwork()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='networks__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self._test_create_button_disabled_when_quota_exceeded(expected_string,
network_quota=0)
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_subnet_create_button_disabled_when_quota_exceeded_index(self):
network_id = self.networks.first().id
create_link = networks_tables.CreateSubnet()
url = reverse(create_link.get_link_url(), args=[network_id])
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' class='%s disabled' "\
"id='networks__row_%s__action_subnet'>%s</a>" \
% (url, " ".join(classes), network_id, link_name)
self._test_create_button_disabled_when_quota_exceeded(expected_string,
subnet_quota=0)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_subnet_create_button_disabled_when_quota_exceeded_detail(self):
network_id = self.networks.first().id
quota_data = self.quota_usages.first()
quota_data['subnets']['available'] = 0
api.neutron.network_get(
IsA(http.HttpRequest), network_id)\
.MultipleTimes().AndReturn(self.networks.first())
api.neutron.subnet_list(
IsA(http.HttpRequest), network_id=network_id)\
.AndReturn(self.subnets.list())
api.neutron.port_list(
IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'mac-learning')\
.AndReturn(False)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
self.assertItemsEqual(subnets, self.subnets.list())
class FakeTable():
kwargs = {'network_id': network_id}
create_link = subnets_tables.CreateSubnet()
create_link.table = FakeTable()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='subnets__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
| |
from pycket.values import W_Symbol
from pycket.prims.expose import define_nyi, prim_env, expose
DEBUG = False
def make_primitive_table(ls_str):
table = [None] * len(ls_str)
for i, exposed_id in enumerate(ls_str):
table[i] = W_Symbol.make(exposed_id)
return table
place_str = ["place-break", "place-channel-get", "place-channel-put",
"place?", "place-enabled?", "place-channel", "place-dead-evt",
"place-kill", "place-message-allowed?", "place-channel?", "dynamic-place",
"place-wait", "place-pumper-threads", "place-shared?"]
paramz_str = ["check-for-break", "break-enabled-key", "parameterization-key", "cache-configuration",
"exception-handler-key", "extend-parameterization", "reparameterize", "security-guard-check-file",
"security-guard-check-file-link", "security-guard-check-network"]
# Exports that are not exposed to Racket, but
# can be used in a linklet:
internal_str = ["call/cm", "extract-procedure", "set-ctl-c-handler!", "register-linklet-instantiate-continuation!",
"impersonator-val", "impersonate-ref", "impersonate-set!", "struct-type-install-properties!",
"structure-type-lookup-prefab-uid", "struct-type-constructor-add-guards", "register-struct-constructor!", "register-struct-predicate!",
"register-struct-field-accessor!", "register-struct-field-mutator!", "struct-property-set!"]
futures_str = ["futures-enabled?", "processor-count", "future", "future?",
"touch", "would-be-future", "current-future", "make-fsemaphore",
"fsemaphore?", "fsemaphore-post", "fsemaphore-wait", "fsemaphore-try-wait?",
"fsemaphore-count", "reset-future-logs-for-tracing!", "mark-future-trace-end!"]
flfxnum_str = ["fx->fl", "fl->fx", "fxabs", "fx+",
"fx-", "fx*", "fxquotient", "fxremainder",
"fx+/wraparound", "fx-/wraparound", "fx*/wraparound", "fxlshift/wraparound",
"fxmodulo", "fxand", "fxior", "fxxor",
"fxnot", "fxrshift", "fxlshift", "fx>=",
"fx>", "fx=", "fx<", "fx<=",
"fxmin", "fxmax", "fxvector?", "fxvector",
"make-fxvector", "shared-fxvector", "make-shared-fxvector", "fxvector-length",
"fxvector-ref", "fxvector-set!", "fl+",
"fl-", "fl*", "fl/", "flabs",
"flsqrt", "flexp", "fllog", "flsin", "flsingle",
"flcos", "fltan", "flasin", "flacos",
"flatan", "flfloor", "flceiling", "flround",
"fltruncate", "flexpt", "fl=", "fl<",
"fl<=", "fl>", "fl>=", "flmin",
"flmax", "->fl", "fl->exact-integer", "flvector?",
"flvector", "make-flvector", "shared-flvector", "make-shared-flvector",
"flvector-length", "flvector-ref", "flvector-set!",
"flreal-part", "flimag-part", "make-flrectangular"]
extfl_str = ["extflmin", "extflatan", "extfl+", "extflmax",
"extflfloor", "extfl<=", "extflvector", "extfl->floating-point-bytes",
"extfltruncate", "->extfl", "extflsin", "extflonum?",
"extflacos", "extflvector-ref", "extflexp", "extflabs",
"extflonum-available?", "extfl<", "extfl->exact", "extfl->fx",
"extfl->inexact", "extflvector-set!", "make-extflvector", "extflcos",
"extflvector-length", "extfl/", "extflceiling", "floating-point-bytes->extfl",
"extfl>=", "make-shared-extflvector", "extflround", "extfl->exact-integer",
"real->extfl", "extflexpt", "fx->extfl", "shared-extflvector",
"extfl>", "extfllog", "extflvector?", "extfl=",
"extflsqrt", "extfl*", "extfl-", "extfltan",
"extflasin"]
network_str = ["tcp-abandon-port", "tcp-accept", "tcp-accept-evt", "tcp-accept-ready?",
"tcp-accept/enable-break", "tcp-addresses", "tcp-close", "tcp-connect",
"tcp-connect/enable-break", "tcp-listen", "tcp-listener?", "tcp-port?",
"udp?", "udp-bind!", "udp-bound?", "udp-close",
"udp-connect!", "udp-connected?", "udp-multicast-interface", "udp-multicast-join-group!",
"udp-multicast-leave-group!", "udp-multicast-loopback?", "udp-multicast-set-interface!", "udp-multicast-set-loopback!",
"udp-multicast-set-ttl!", "udp-multicast-ttl", "udp-open-socket", "udp-receive!",
"udp-receive!*", "udp-receive!-evt", "udp-receive!/enable-break", "udp-receive-ready-evt",
"udp-set-receive-buffer-size!",
"udp-send", "udp-send*", "udp-send-evt", "udp-send-ready-evt",
"udp-send-to", "udp-send-to*", "udp-send-to-evt", "udp-send-to/enable-break",
"udp-send/enable-break"]
foreign_str = ["_bool", "_bytes", "_double", "_double*",
"_fixint", "_fixnum", "_float", "_fpointer",
"_gcpointer", "_int16", "_int32", "_int64",
"_int8", "_longdouble", "_path", "_pointer",
"_scheme", "_stdbool", "_string/ucs-4", "_string/utf-16",
"_symbol", "_ufixint", "_ufixnum", "_uint16",
"_uint32", "_uint64", "_uint8", "_void",
"compiler-sizeof", "cpointer-gcable?", "cpointer-tag", "cpointer?",
"ctype-alignof", "ctype-basetype", "ctype-c->scheme", "ctype-scheme->c",
"ctype-sizeof", "ctype?", "end-stubborn-change", "extflvector->cpointer",
"ffi-call", "ffi-callback", "ffi-callback?", "ffi-lib",
"ffi-lib-name", "ffi-lib?", "ffi-obj", "ffi-obj-lib",
"ffi-obj-name", "ffi-obj?", "flvector->cpointer", "free",
"ffi-lib-unload",
"free-immobile-cell", "lookup-errno", "make-array-type", "make-cstruct-type",
"make-ctype", "make-late-weak-box", "make-late-weak-hasheq", "make-sized-byte-string",
"make-stubborn-will-executor", "make-union-type", "malloc", "malloc-immobile-cell",
"memcpy", "memmove", "memset", "offset-ptr?",
"prop:cpointer", "ptr-add", "ptr-add!", "ptr-equal?",
"ptr-offset", "ptr-ref", "ptr-set!", "saved-errno",
"set-cpointer-tag!", "set-ptr-offset!", "vector->cpointer",
"ffi-callback-maker", "ffi-call-maker", "make-late-will-executor"]
linklet_str = ["linklet?", "compile-linklet", "recompile-linklet", "eval-linklet", "read-compiled-linklet", "instantiate-linklet",
"linklet-import-variables", "linklet-export-variables", "instance?", "make-instance", "instance-name", "instance-data",
"instance-variable-names", "instance-variable-value", "instance-set-variable-value!", "instance-unset-variable!",
"linklet-directory?", "hash->linklet-directory", "linklet-directory->hash", "linklet-bundle?", "hash->linklet-bundle",
"linklet-bundle->hash", "variable-reference?", "variable-reference->instance", "variable-reference-constant?",
"primitive-table", "variable-reference-from-unsafe?",
"compiled-position->primitive", "linklet-virtual-machine-bytes",
"read-linklet-bundle-hash", "write-linklet-bundle-hash", "instance-describe-variable!",
"primitive-lookup"]
unsafe_str = ["unsafe-car", "unsafe-cdr", "unsafe-list-tail",
"unsafe-list-ref", "unsafe-cons-list", "unsafe-fx+",
"unsafe-fx-", "unsafe-fx*", "unsafe-fxquotient",
"unsafe-fxremainder", "unsafe-fxmodulo", "unsafe-fxabs",
"unsafe-fxand", "unsafe-fxior", "unsafe-fxxor",
"unsafe-fxnot", "unsafe-fxrshift", "unsafe-fxlshift",
"unsafe-fx=", "unsafe-fx<", "unsafe-fx>",
"unsafe-fx>=", "unsafe-fx<=", "unsafe-fxmin",
"unsafe-fxmax", "unsafe-fl+", "unsafe-fl-",
"unsafe-fl*", "unsafe-fl/", "unsafe-flabs",
"unsafe-fl=", "unsafe-fl<", "unsafe-fl>",
"unsafe-fl>=", "unsafe-fl<=", "unsafe-flmin",
"unsafe-flmax", "unsafe-fx->fl", "unsafe-fl->fx",
"unsafe-flrandom", "unsafe-flsqrt", "unsafe-make-flrectangular",
"unsafe-flreal-part", "unsafe-flimag-part", "unsafe-extfl*",
"unsafe-extfl+", "unsafe-extfl-", "unsafe-extfl/",
"unsafe-extfl<", "unsafe-extfl<=", "unsafe-extfl=",
"unsafe-extfl>", "unsafe-extfl>=", "unsafe-extflabs",
"unsafe-extflmax", "unsafe-extflmin", "unsafe-extflsqrt",
"unsafe-extfl->fx", "unsafe-fx->extfl", "unsafe-extflvector-length",
"unsafe-extflvector-ref", "unsafe-extflvector-set!", "unsafe-unbox*",
"unsafe-set-box*!", "unsafe-set-box!", "unsafe-unbox",
"unsafe-box*-cas!", "unsafe-mcar", "unsafe-mcdr",
"unsafe-set-mcar!", "unsafe-set-mcdr!", "unsafe-vector-ref",
"unsafe-vector-set!", "unsafe-vector*-ref", "unsafe-vector*-set!",
"unsafe-vector*-cas!", "unsafe-vector-length", "unsafe-vector*-length",
"unsafe-fxvector-length", "unsafe-fxvector-ref", "unsafe-fxvector-set!",
"unsafe-flvector-length", "unsafe-flvector-ref", "unsafe-flvector-set!",
"unsafe-s16vector-ref", "unsafe-s16vector-set!", "unsafe-u16vector-ref",
"unsafe-u16vector-set!", "unsafe-f64vector-ref", "unsafe-f64vector-set!",
"unsafe-f80vector-set!", "unsafe-f80vector-ref", "unsafe-bytes-length",
"unsafe-bytes-ref", "unsafe-bytes-set!", "unsafe-string-length",
"unsafe-string-set!", "unsafe-string-ref", "unsafe-struct-ref",
"unsafe-struct-set!", "unsafe-struct*-ref", "unsafe-struct*-set!",
"unsafe-immutable-hash-iterate-key+value", "unsafe-immutable-hash-iterate-pair",
"unsafe-immutable-hash-iterate-value", "unsafe-immutable-hash-iterate-key",
"unsafe-immutable-hash-iterate-first", "unsafe-immutable-hash-iterate-next",
"unsafe-mutable-hash-iterate-key+value", "unsafe-mutable-hash-iterate-pair",
"unsafe-mutable-hash-iterate-value", "unsafe-mutable-hash-iterate-key",
"unsafe-mutable-hash-iterate-first", "unsafe-mutable-hash-iterate-next",
"unsafe-weak-hash-iterate-key+value", "unsafe-weak-hash-iterate-pair",
"unsafe-weak-hash-iterate-value", "unsafe-weak-hash-iterate-key",
"unsafe-weak-hash-iterate-first", "unsafe-weak-hash-iterate-next",
"unsafe-ephemeron-hash-iterate-key+value", "unsafe-ephemeron-hash-iterate-pair",
"unsafe-ephemeron-hash-iterate-value", "unsafe-ephemeron-hash-iterate-key",
"unsafe-ephemeron-hash-iterate-first", "unsafe-ephemeron-hash-iterate-next",
"unsafe-chaperone-procedure", "unsafe-impersonate-procedure", "unsafe-impersonate-vector",
"unsafe-chaperone-vector", "unsafe-undefined", "check-not-unsafe-undefined",
"check-not-unsafe-undefined/assign", "prop:chaperone-unsafe-undefined",
"chaperone-struct-unsafe-undefined",
"unsafe-start-atomic", "unsafe-end-atomic", "unsafe-start-breakable-atomic",
"unsafe-end-breakable-atomic", "unsafe-in-atomic?", "unsafe-set-on-atomic-timeout!",
"unsafe-thread-at-root", "unsafe-make-custodian-at-root", "unsafe-custodian-register",
"unsafe-custodian-unregister", "unsafe-register-process-global", "unsafe-make-security-guard-at-root",
"unsafe-abort-current-continuation/no-wind", "unsafe-call-with-composable-continuation/no-wind",
"unsafe-poller", "unsafe-poll-fd",
"unsafe-poll-ctx-fd-wakeup", "unsafe-poll-ctx-eventmask-wakeup", "unsafe-poll-ctx-milliseconds-wakeup",
"unsafe-signal-received", "unsafe-set-sleep-in-thread!",
"unsafe-file-descriptor->port", "unsafe-socket->port",
"unsafe-file-descriptor->semaphore", "unsafe-socket->semaphore",
"unsafe-port->file-descriptor", "unsafe-port->socket",
"unsafe-get-place-table", "unsafe-call-in-os-thread",
"unsafe-make-os-semaphore", "unsafe-os-semaphore-post", "unsafe-os-semaphore-wait",
"unsafe-os-thread-enabled?", "unsafe-struct*-cas!",
"unsafe-add-post-custodian-shutdown",
"unsafe-root-continuation-prompt-tag",
"unsafe-make-place-local",
"unsafe-place-local-ref",
"unsafe-place-local-set!",
"unsafe-char<?", "unsafe-char<=?", "unsafe-char=?", "unsafe-char>=?", "unsafe-char>?",
"unsafe-char->integer",
"unsafe-add-global-finalizer", "unsafe-add-collect-callbacks",
"unsafe-remove-collect-callbacks"]
# This table omits anything that the expander implements itself,
# since the expander will export its own variant instead of the
# `kernel-table` variant.
kernel_str = ["*", "+", "-",
"/", "<", "<=",
"=", ">", ">=",
"quotient", "quotient/remainder", "remainder",
"abort-current-continuation", "abs", "absolute-path?",
"add1", "acos", "alarm-evt",
"always-evt", "andmap", "angle",
"append", "apply", "arithmetic-shift",
"asin", "assoc", "assq",
"assv", "atan", "banner",
"bitwise-and", "bitwise-bit-set?", "bitwise-bit-field",
"bitwise-ior", "bitwise-not", "bitwise-xor",
"boolean?", "box",
"box-cas!", "box-immutable", "box?",
"break-enabled", "break-thread", "build-path",
"build-path/convention-type", "byte-ready?", "byte-pregexp",
"byte-pregexp?", "byte-regexp", "byte-regexp?",
"byte?", "bytes", "bytes->immutable-bytes",
"bytes->list", "bytes->path", "bytes->path-element",
"bytes->string/latin-1", "bytes->string/locale", "bytes->string/utf-8",
"bytes-append", "bytes-close-converter", "bytes-convert",
"bytes-convert-end", "bytes-converter?", "bytes-copy",
"bytes-copy!", "bytes-fill!", "bytes-length",
"bytes-open-converter", "bytes-ref", "bytes-set!",
"bytes-utf-8-index", "bytes-utf-8-length", "bytes-utf-8-ref",
"bytes>?", "bytes<?", "bytes=?",
"bytes?",
"caadr", "call-in-nested-thread", "call-with-composable-continuation",
"call-with-continuation-barrier", "call-with-continuation-prompt", "call-with-current-continuation",
"call-with-escape-continuation", "call-with-immediate-continuation-mark", "call-with-input-file",
"call-with-output-file", "call-with-semaphore", "call-with-semaphore/enable-break",
"call-with-values", "ceiling", "channel?",
"channel-put-evt", "channel-put-evt?", "chaperone?",
"chaperone-of?", "chaperone-box", "chaperone-continuation-mark-key",
"chaperone-channel", "chaperone-evt", "chaperone-hash",
"chaperone-procedure", "chaperone-procedure*", "chaperone-prompt-tag",
"chaperone-struct", "chaperone-struct-type", "chaperone-vector",
"chaperone-vector*", "char->integer", "char-alphabetic?",
"char-downcase", "char-foldcase", "char-general-category",
"char-graphic?", "char-blank?", "char-iso-control?",
"char-numeric?", "char-ready?", "char-lower-case?",
"char-punctuation?", "char-symbolic?", "char-title-case?",
"char-upper-case?", "char-upcase", "char-titlecase",
"char-whitespace?", "char-utf-8-length", "char<=?",
"char<?", "char=?", "char>=?",
"char>?", "char?", "char-ci<=?",
"char-ci<?", "char-ci=?", "char-ci>=?",
"char-ci>?", "checked-procedure-check-and-extract", "choice-evt",
"cleanse-path", "close-input-port", "close-output-port",
"collect-garbage", "complex?",
"compile-allow-set!-undefined", "compile-enforce-module-constants", "compile-context-preservation-enabled",
"complete-path?", "continuation-marks", "continuation-mark-key?",
"continuation-mark-set?", "continuation-mark-set-first", "continuation-mark-set->list",
"continuation-mark-set->list*", "continuation-mark-set->context", "continuation-prompt-available?",
"continuation-prompt-tag?", "continuation?", "copy-file",
"cos", "current-code-inspector", "current-command-line-arguments",
"current-continuation-marks", "current-custodian", "current-directory",
"current-directory-for-user", "current-drive", "current-environment-variables",
"current-error-port", "current-evt-pseudo-random-generator", "current-force-delete-permissions",
"current-gc-milliseconds", "current-get-interaction-input-port", "current-inexact-milliseconds",
"current-input-port", "current-inspector", "current-load-extension",
"current-load-relative-directory", "current-locale", "current-logger",
"current-memory-use", "current-milliseconds", "current-output-port",
"current-plumber", "current-preserved-thread-cell-values", "current-print",
"current-process-milliseconds", "current-prompt-read", "current-pseudo-random-generator",
"current-read-interaction", "current-seconds", "current-security-guard",
"current-subprocess-custodian-mode", "current-thread", "current-thread-group",
"current-thread-initial-stack-size", "current-write-relative-directory", "custodian?",
"custodian-box?", "custodian-box-value", "custodian-limit-memory",
"custodian-managed-list", "custodian-memory-accounting-available?", "custodian-require-memory",
"custodian-shutdown-all", "custodian-shut-down?", "custom-print-quotable?", "custom-print-quotable-accessor",
"custom-write?", "custom-write-accessor", "datum-intern-literal",
"default-continuation-prompt-tag", "delete-directory", "delete-file",
"denominator", "directory-exists?", "directory-list",
"display", "dump-memory-stats",
"dynamic-wind", "environment-variables-ref", "environment-variables-set!",
"environment-variables-copy", "environment-variables-names", "environment-variables?",
"eof", "eof-object?", "ephemeron?",
"ephemeron-value", "eprintf", "eq-hash-code",
"eq?", "equal-hash-code", "equal-secondary-hash-code",
"equal?", "equal?/recur", "eqv?",
"eqv-hash-code", "error", "error-display-handler",
"error-escape-handler", "error-print-context-length", "error-print-source-location",
"error-print-width", "error-value->string-handler", "eval-jit-enabled",
"even?", "evt?", "exact-integer?",
"exact-nonnegative-integer?", "exact-positive-integer?", "exact?",
"exact->inexact", "executable-yield-handler", "exit",
"exit-handler", "exn-continuation-marks", "exn-message",
"exn?", "expand-user-path", "exp",
"explode-path", "expt", "file-exists?",
"file-or-directory-modify-seconds", "file-or-directory-identity",
"file-or-directory-permissions", "file-or-directory-type",
"file-position", "file-position*", "file-size",
"file-stream-buffer-mode", "file-stream-port?", "file-truncate",
"filesystem-change-evt", "filesystem-change-evt?", "filesystem-change-evt-cancel",
"filesystem-root-list", "find-system-path", "fixnum?",
"flonum?", "floor", "floating-point-bytes->real",
"flush-output", "for-each", "format",
"fprintf", "gcd", "gensym",
"get-output-bytes", "get-output-string", "global-port-print-handler",
"handle-evt", "handle-evt?", "hash",
"hash-clear", "hash-clear!", "hash-copy",
"hash-count", "hash-eq?", "hash-eqv?",
"hash-equal?", "hash-for-each", "hash-iterate-first",
"hash-iterate-key", "hash-iterate-key+value", "hash-iterate-next",
"hash-iterate-pair", "hash-iterate-value", "hash-keys-subset?",
"hash-map", "hash-placeholder?", "hash-ref",
"hash-remove", "hash-remove!", "hash-set",
"hash-set!", "hash-weak?", "hash-strong?", "hash-ephemeron?", "hash?",
"hasheq", "hasheqv", "imag-part",
"immutable?", "impersonate-box", "impersonate-channel",
"impersonate-continuation-mark-key", "impersonate-hash", "impersonate-procedure",
"impersonate-procedure*", "impersonate-prompt-tag", "impersonate-struct",
"impersonate-vector", "impersonate-vector*", "impersonator?",
"impersonator-ephemeron", "impersonator-of?", "impersonator-property?",
"impersonator-prop:application-mark", "impersonator-property-accessor-procedure?", "inexact?",
"inexact-real?", "inexact->exact", "input-port?",
"inspector-superior?", "inspector?", "integer->char",
"integer->integer-bytes", "integer-bytes->integer", "integer-length",
"integer-sqrt", "integer-sqrt/remainder", "integer?",
"interned-char?", "kill-thread", "lcm",
"length", "link-exists?", "list",
"list*", "list->bytes", "list->string",
"list->vector", "list-ref", "list-tail",
"list?", "list-pair?", "load-on-demand-enabled",
"locale-string-encoding", "log", "logger?",
"logger-name", "log-all-levels", "log-level?",
"log-level-evt", "log-max-level", "log-message",
"log-receiver?", "magnitude", "make-bytes",
"make-channel", "make-continuation-mark-key", "make-continuation-prompt-tag",
"make-custodian", "make-custodian-box", "make-derived-parameter",
"make-directory", "make-environment-variables", "make-ephemeron",
"make-file-or-directory-link", "make-hash", "make-hash-placeholder",
"make-hasheq", "make-hasheq-placeholder", "make-hasheqv",
"make-hasheqv-placeholder", "make-input-port", "make-immutable-hash",
"make-immutable-hasheq", "make-immutable-hasheqv", "make-impersonator-property",
"make-inspector", "make-known-char-range-list", "make-logger",
"make-log-receiver", "make-output-port", "make-parameter",
"make-phantom-bytes", "make-pipe", "make-placeholder",
"make-plumber", "make-polar", "make-prefab-struct",
"make-pseudo-random-generator", "make-reader-graph", "make-rectangular",
"make-security-guard", "make-semaphore", "make-shared-bytes",
"make-sibling-inspector", "make-string", "make-struct-field-accessor",
"make-struct-field-mutator", "make-struct-type", "make-struct-type-property",
"make-thread-cell", "make-thread-group", "make-vector",
"make-weak-box", "make-weak-hash", "make-weak-hasheq",
"make-weak-hasheqv", "make-will-executor", "map",
"max", "memq", "memv", "min", "modulo",
"most-positive-fixnum", "most-negative-fixnum",
"nack-guard-evt", "negative?", "never-evt",
"newline", "not", "null",
"null?", "number->string", "number?",
"numerator", "object-name", "odd?",
"open-input-bytes", "open-input-file", "open-input-output-file",
"open-input-string", "open-output-bytes", "open-output-file",
"open-output-string", "ormap", "output-port?",
"parameter?", "parameter-procedure=?", "parameterization?",
"path->bytes", "path->complete-path", "path->directory-path",
"path->string", "path-convention-type", "path-element->bytes",
"path-element->string", "path-for-some-system?", "path?",
"path<?", "peek-byte", "peek-byte-or-special",
"peek-bytes", "peek-bytes!", "peek-bytes-avail!",
"peek-bytes-avail!*", "peek-bytes-avail!/enable-break", "peek-char-or-special",
"peek-char", "peek-string", "peek-string!",
"phantom-bytes?", "pipe-content-length", "placeholder?",
"placeholder-get", "placeholder-set!", "plumber-add-flush!",
"plumber-flush-all", "plumber-flush-handle-remove!", "plumber-flush-handle?",
"plumber?", "poll-guard-evt", "port-closed?",
"port-closed-evt", "port-commit-peeked", "port-count-lines!",
"port-count-lines-enabled", "port-counts-lines?", "port-file-identity",
"port-file-unlock", "port-next-location", "port-display-handler",
"port-print-handler", "port-progress-evt", "port-provides-progress-evts?",
"port-read-handler", "set-port-next-location!", "port-try-file-lock?",
"port-write-handler", "port-writes-atomic?", "port-writes-special?",
"positive?", "prefab-key->struct-type", "prefab-key?",
"prefab-struct-key", "pregexp", "pregexp?",
"primitive?", "primitive-closure?",
"primitive-result-arity", "printf", "print",
"print-as-expression", "print-boolean-long-form", "print-box",
"print-graph", "print-hash-table", "print-mpair-curly-braces",
"print-pair-curly-braces", "print-reader-abbreviations", "print-struct",
"print-syntax-width", "print-vector-length", "print-unreadable",
"procedure-arity", "procedure-arity-mask",
"procedure-arity?", "procedure-arity-includes?",
"procedure-extract-target", "procedure-impersonator*?",
"procedure-reduce-arity", "procedure-reduce-arity-mask",
"procedure-rename", "procedure-result-arity", "procedure->method",
"procedure?", "procedure-specialize", "procedure-struct-type?",
"procedure-closure-contents-eq?", "progress-evt?", "prop:arity-string",
"prop:authentic", "prop:checked-procedure", "prop:custom-print-quotable",
"prop:custom-write", "prop:equal+hash", "prop:evt",
"prop:impersonator-of", "prop:incomplete-arity", "prop:method-arity-error",
"prop:procedure", "prop:object-name", "prop:output-port",
"prop:input-port", "prop:sealed", "pseudo-random-generator?", "pseudo-random-generator->vector",
"pseudo-random-generator-vector?", "random", "random-seed",
"raise", "raise-user-error", "rational?",
"read-accept-bar-quote", "read-byte", "read-byte-or-special",
"read-bytes", "read-bytes!", "read-bytes-avail!",
"read-bytes-avail!*", "read-bytes-avail!/enable-break", "read-bytes-line",
"read-case-sensitive", "read-char", "read-char-or-special",
"read-line", "read-on-demand-source", "read-string",
"read-string!", "real?", "real-part",
"real->double-flonum", "real->floating-point-bytes", "real->single-flonum", "single-flonum-available?",
"regexp", "regexp-match", "regexp-match/end",
"regexp-match-positions", "regexp-match-positions/end", "regexp-match-peek",
"regexp-match-peek-immediate", "regexp-match-peek-positions", "regexp-match-peek-positions/end",
"regexp-match-peek-positions-immediate", "regexp-match-peek-positions-immediate/end", "regexp-match?",
"regexp-max-lookbehind", "regexp-replace", "regexp-replace*",
"regexp?", "relative-path?", "rename-file-or-directory",
"replace-evt", "resolve-path", "reverse",
"round", "seconds->date", "security-guard?",
"semaphore?", "semaphore-peek-evt", "semaphore-peek-evt?",
"semaphore-post", "semaphore-try-wait?", "semaphore-wait",
"semaphore-wait/enable-break", "set-box!", "set-box*!", "set-phantom-bytes!",
"shared-bytes", "shell-execute", "simplify-path",
"sin", "single-flonum?", "sleep",
"split-path", "sqrt", "string",
"string->bytes/latin-1", "string->bytes/locale", "string->bytes/utf-8",
"string->immutable-string", "string->list", "string->number",
"string->path", "string->path-element", "string->symbol",
"string->uninterned-symbol", "string->unreadable-symbol",
"string-append", "string-append-immutable"
"string-ci=?", "string-ci<=?", "string-ci<?",
"string-ci>=?", "string-ci>?", "string-copy",
"string-copy!", "string-downcase", "string-fill!",
"string-foldcase", "string-length", "string-locale-downcase",
"string-locale-ci<?", "string-locale-ci=?", "string-locale-ci>?",
"string-locale-upcase", "string-locale<?", "string-locale=?",
"string-locale>?", "string-normalize-nfc", "string-normalize-nfd",
"string-normalize-nfkc", "string-normalize-nfkd", "string-port?",
"string-ref", "string-set!", "string-titlecase",
"string-upcase", "string-utf-8-length", "string<=?",
"string<?", "string=?", "string>=?",
"string>?", "string?", "struct->vector",
"struct-type?", "struct?", "struct-accessor-procedure?",
"struct-mutator-procedure?", "struct-constructor-procedure?", "struct-info",
"struct-predicate-procedure?", "struct-type-info", "struct-type-make-constructor",
"struct-type-make-predicate", "struct-type-property-accessor-procedure?", "struct-type-property?",
"sub1", "subbytes", "subprocess?",
"subprocess", "subprocess-group-enabled", "subprocess-kill",
"subprocess-pid", "subprocess-status", "subprocess-wait",
"substring", "symbol->string", "symbol->immutable-string", "symbol-interned?",
"symbol-unreadable?", "symbol<?", "symbol?",
"sync", "sync/timeout", "sync/enable-break",
"sync/timeout/enable-break", "system-big-endian?", "system-idle-evt",
"system-language+country", "system-library-subpath", "system-path-convention-type",
"system-type", "tan", "terminal-port?",
"time-apply", "thread", "thread/suspend-to-kill",
"thread?", "thread-cell?", "thread-cell-ref",
"thread-cell-set!", "thread-cell-values?", "thread-dead?",
"thread-dead-evt", "thread-group?",
"thread-receive", "thread-receive-evt", "thread-resume",
"thread-resume-evt", "thread-rewind-receive", "thread-running?",
"thread-send", "thread-receive", "thread-suspend",
"thread-suspend-evt", "thread-try-receive", "thread-wait",
"true-object?", "truncate", "unbox", "unbox*",
"uncaught-exception-handler", "unquoted-printing-string", "unquoted-printing-string?",
"unquoted-printing-string-value", "values", "vector",
"vector->immutable-vector", "vector->list", "vector->pseudo-random-generator",
"vector->pseudo-random-generator!", "vector->values", "vector-cas!",
"vector-copy!", "vector-fill!", "vector-immutable",
"vector-length", "vector-ref", "vector-set!",
"vector*-length", "vector*-ref", "vector*-set!",
"vector-set-performance-stats!", "vector?", "version",
"void", "void?", "weak-box?",
"weak-box-value", "will-execute", "will-executor?",
"will-register", "will-try-execute", "with-input-from-file",
"with-output-to-file", "wrap-evt", "write",
"write-byte", "write-bytes", "write-bytes-avail",
"write-bytes-avail*", "write-bytes-avail/enable-break", "write-bytes-avail-evt",
"write-char", "write-special", "write-special-avail*",
"write-special-evt", "write-string", "zero?",
"keyword<?", "string->keyword", "keyword->string", "keyword->immutable-string",
"keyword?", "cons", "pair?",
"car", "cdr", "caar",
"cadr", "cdar", "cddr",
"caaar", "caadr", "cadar",
"caddr", "cdaar", "cdadr",
"cddar", "cdddr", "caaaar",
"caaadr", "caadar", "caaddr",
"cadaar", "cadadr", "caddar",
"cadddr", "cdaaar", "cdaadr",
"cdadar", "cdaddr", "cddaar",
"cddadr", "cdddar", "cddddr",
"mpair?", "mcons", "mcar",
"mcdr", "set-mcar!", "set-mcdr!",
"raise-argument-error", "raise-arguments-error", "raise-result-error",
"raise-mismatch-error", "raise-range-error", "raise-arity-error",
"raise-result-arity-error", "raise-arity-mask-error",
"raise-type-error", "struct:exn", "exn",
"exn?", "exn-message", "exn-continuation-marks",
"struct:exn:break", "exn:break", "exn:break?",
"exn:break-continuation", "struct:exn:break:hang-up", "exn:break:hang-up",
"exn:break:hang-up?", "struct:exn:break:terminate", "exn:break:terminate",
"exn:break:terminate?", "struct:exn:fail", "exn:fail",
"exn:fail?", "struct:exn:fail:contract", "exn:fail:contract",
"exn:fail:contract?", "struct:exn:fail:contract:arity", "exn:fail:contract:arity",
"exn:fail:contract:arity?", "struct:exn:fail:contract:divide-by-zero", "exn:fail:contract:divide-by-zero",
"exn:fail:contract:divide-by-zero?", "struct:exn:fail:contract:non-fixnum-result", "exn:fail:contract:non-fixnum-result",
"exn:fail:contract:non-fixnum-result?", "struct:exn:fail:contract:continuation", "exn:fail:contract:continuation",
"exn:fail:contract:continuation?", "struct:exn:fail:contract:variable", "exn:fail:contract:variable",
"exn:fail:contract:variable?", "exn:fail:contract:variable-id", "struct:exn:fail:read",
"exn:fail:read", "exn:fail:read?", "exn:fail:read-srclocs",
"struct:exn:fail:read:eof", "exn:fail:read:eof", "exn:fail:read:eof?",
"struct:exn:fail:read:non-char", "exn:fail:read:non-char", "exn:fail:read:non-char?",
"struct:exn:fail:filesystem", "exn:fail:filesystem", "exn:fail:filesystem?",
"struct:exn:fail:filesystem:exists", "exn:fail:filesystem:exists", "exn:fail:filesystem:exists?",
"struct:exn:fail:filesystem:version", "exn:fail:filesystem:version", "exn:fail:filesystem:version?",
"struct:exn:fail:filesystem:errno", "exn:fail:filesystem:errno", "exn:fail:filesystem:errno?",
"exn:fail:filesystem:errno-errno", "struct:exn:fail:network", "exn:fail:network",
"exn:fail:network?", "struct:exn:fail:network:errno", "exn:fail:network:errno",
"exn:fail:network:errno?", "exn:fail:network:errno-errno", "struct:exn:fail:out-of-memory",
"exn:fail:out-of-memory", "exn:fail:out-of-memory?", "struct:exn:fail:unsupported",
"exn:fail:unsupported", "exn:fail:unsupported?", "struct:exn:fail:user",
"exn:fail:user", "exn:fail:user?", "prop:exn:srclocs",
"exn:srclocs?", "exn:srclocs-accessor", "struct:srcloc",
"srcloc", "srcloc?", "srcloc-source",
"srcloc-line", "srcloc-column", "srcloc-position",
"srcloc-span", "srcloc->string", "struct:date",
"date?", "date",
"date-second", "date-minute", "date-hour",
"date-day", "date-month", "date-year",
"date-week-day", "date-year-day", "date-dst?",
"date-time-zone-offset", "struct:date*", "date*?",
"date*", "date*-nanosecond",
"date*-time-zone-name", "struct:arity-at-least", "arity-at-least",
"arity-at-least?", "arity-at-least-value", "syntax?",
"syntax-source", "syntax-line", "syntax-column",
"syntax-position", "syntax-span", "syntax-e",
"syntax->datum", "datum->syntax", "syntax-property",
"syntax-property-symbol-keys",
"current-compile-target-machine",
"compile-target-machine?",
"sha1-bytes", "sha224-bytes", "sha256-bytes"]
pycket_extra_str = ["pycket:activate-debug", "pycket:deactivate-debug",
"pycket:get-verbosity", "pycket:set-verbosity",
"pycket:is-debug-active", "pycket:print",
"pycket:activate-keyword", "pycket:deactivate-keyword",
"pycket:eq?", "pycket:report-undefined-prims"]
schemify_hooks = ["variable-ref", "variable-ref/no-check",
"variable-set!/check-undefined", "variable-set!"]
# The reason for make_primitive_table is for turning these into list
# of symbols (to avoid making new objects everytime we look things up)
place = make_primitive_table(place_str)
paramz = make_primitive_table(paramz_str)
internal = make_primitive_table(internal_str)
futures = make_primitive_table(futures_str)
flfxnum = make_primitive_table(flfxnum_str)
extfl = make_primitive_table(extfl_str)
network = make_primitive_table(network_str)
foreign = make_primitive_table(foreign_str)
linklet = make_primitive_table(linklet_str)
unsafe = make_primitive_table(unsafe_str)
# FIXME : make it a #%pycket-extra, instead of piggybacking on the #%kernel
kernel = make_primitive_table(kernel_str)
pycket = make_primitive_table(pycket_extra_str + schemify_hooks)
select_prim_table = {W_Symbol.make("#%linklet"): linklet,
W_Symbol.make("#%kernel"): kernel,
W_Symbol.make("#%paramz"): paramz,
W_Symbol.make("#%unsafe"): unsafe,
W_Symbol.make("#%foreign"): foreign,
W_Symbol.make("#%futures"): futures,
W_Symbol.make("#%place"): place,
W_Symbol.make("#%flfxnum"): flfxnum,
W_Symbol.make("#%extfl"): extfl,
W_Symbol.make("#%pycket"): pycket,
W_Symbol.make("#%network"): network}
# Lists of actual functions indexed by the names above
prim_table_cache = {}
all_prims = linklet_str + \
kernel_str + \
paramz_str + \
unsafe_str + \
foreign_str + \
futures_str + \
place_str + \
flfxnum_str + \
extfl_str + \
pycket_extra_str + \
schemify_hooks + \
network_str
if DEBUG:
print("\n\nPriming all primitives in : linklet + kernel + paramz + unsafe + foreign + futures + place + flfxnum + extfl + network\n")
for prim_name_str in all_prims:
define_nyi(prim_name_str)
def report_undefined_prims():
linklets = get_undef_prims_in(linklet_str)
kernel = get_undef_prims_in(kernel_str)
paramz = get_undef_prims_in(paramz_str)
unsafe = get_undef_prims_in(unsafe_str)
foreign = get_undef_prims_in(foreign_str)
futures = get_undef_prims_in(futures_str)
places = get_undef_prims_in(place_str)
flfxnum = get_undef_prims_in(flfxnum_str)
extfl = get_undef_prims_in(extfl_str)
network = get_undef_prims_in(network_str)
total = linklets + kernel + paramz + unsafe + foreign + futures + places + flfxnum + extfl + network
report = """
linklets : %s -- %s
kernel : %s -- %s
paramz : %s -- %s
unsafe : %s -- %s
foreign : %s -- %s
futures : %s -- %s
places : %s -- %s
flfxnum : %s -- %s
extfl : %s -- %s
network : %s -- %s
TOTAL : %s
""" % (len(linklets), linklets,
len(kernel), kernel,
len(paramz), paramz,
len(unsafe), unsafe,
len(foreign), foreign,
len(futures), futures,
len(places), places,
len(flfxnum), flfxnum,
len(extfl), extfl,
len(network), network, len(total))
print(report)
return 0
def get_undef_prims_in(table):
from pycket.prims.expose import prim_env
from pycket.values import W_Symbol, W_Prim
ls = []
for name in table:
p = prim_env[W_Symbol.make(name)]
if isinstance(p, W_Prim) and not p.is_implemented():
ls.append(name)
return ls
| |
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json, time
from collections import defaultdict
from datetime import datetime
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
from functools import lru_cache
import traceback
import threading
import hmac
import stat
import inspect, weakref
import itertools
import subprocess
from locale import localeconv
from abc import ABC, abstractmethod
from traceback import format_exception
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
DEFAULT_BASE_UNIT = "BCH"
base_units = {'BCH':8, 'mBCH':5, 'bits':2}
inv_base_units = inv_dict(base_units)
base_unit_labels = tuple(inv_base_units[dp] for dp in sorted(inv_base_units.keys(), reverse=True)) # ('BCH', 'mBCH', 'bits')
def _(message): return message
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
del _
from .i18n import _, ngettext
class NotEnoughFunds(Exception): pass
class ExcessiveFee(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __str__(self):
return _("Failed to import file.")
class FileImportFailedEncrypted(FileImportFailed):
def __str__(self):
return (_('Failed to import file.') + ' ' +
_('Perhaps it is encrypted...') + '\n' +
_('Importing encrypted files is not supported.'))
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError:
'''A handy base class for printing formatted log messages'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
def print_exception(self, *msg):
text = ' '.join(str(item) for item in msg)
text += ': '
text += ''.join(format_exception(*sys.exc_info()))
self.print_error(text)
SPAM_MSG_RATE_LIMIT = 1.0 # Once every second
_print_error_last_spam_msg = 0.0
def _spam_common(self, method, *args):
'''Used internally to control spam messages. *All* messages called with
spam_* are suppressed to max once every SPAM_MSG_RATE_LIMIT seconds'''
now = time.time()
if now - self._print_error_last_spam_msg >= self.SPAM_MSG_RATE_LIMIT:
method(*args)
self._print_error_last_spam_msg = now
def spam_error(self, *args):
''' Like self.print_error except it only prints the supplied args
once every self.SPAM_MSG_RATE_LIMIT seconds. '''
self._spam_common(self.print_error, *args)
def spam_msg(self, *args): self._spam_common(self.print_msg, *args)
def spam_stderr(self, *args): self._spam_common(self.print_stderr, *args)
class ThreadJob(ABC, PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
@abstractmethod
def run(self):
"""Called periodically from the thread"""
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = [] # could use a set here but order is important, so we enforce uniqueness in this list in the add/remove methods
self._jobs2add = list() # adding jobs needs to preserve order, so we use a list.
self._jobs2rm = set() # removing jobs does not need to preserve orer so we can benefit from the uniqueness property of using a set.
def add_jobs(self, jobs):
if threading.currentThread() is not self:
with self.job_lock:
for job in jobs:
if job not in self.jobs: # ensure unique
self.jobs.append(job)
self.print_error("Job added", job)
else:
self.print_error("add_jobs: FIXME job already added", job)
else:
# support for adding/removing jobs from within the ThreadJob's .run
self._jobs2rm.difference_update(jobs)
self._jobs2add.extend(jobs)
def remove_jobs(self, jobs):
if threading.currentThread() is not self:
with self.job_lock:
for job in jobs:
ct = 0
while job in self.jobs: # enfore unique jobs
self.jobs.remove(job)
ct += 1
self.print_error("Job removed", job)
if not ct:
self.print_error("remove_jobs: FIXME job not found", job)
else:
# support for adding/removing jobs from within the ThreadJob's .run
for job in jobs:
while job in self._jobs2add: # enforce uniqueness of jobs
self._jobs2add.remove(job)
self._jobs2rm.update(jobs)
def run_jobs(self):
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
traceback.print_exc(file=sys.stderr)
# below is support for jobs adding/removing themselves
# during their run implementation.
for addjob in self._jobs2add:
if addjob not in self.jobs:
self.jobs.append(addjob)
self.print_error("Job added", addjob)
self._jobs2add.clear()
for rmjob in self._jobs2rm:
while rmjob in self.jobs:
self.jobs.remove(rmjob)
self.print_error("Job removed", rmjob)
self._jobs2rm.clear()
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
self.print_error("stopped")
# TODO: disable
is_verbose = True
verbose_timestamps = True
verbose_thread_id = True
def set_verbosity(b, *, timestamps=True, thread_id=True):
global is_verbose, verbose_timestamps, verbose_thread_id
is_verbose = b
verbose_timestamps = timestamps
verbose_thread_id = thread_id
# Method decorator. To be used for calculations that will always
# deliver the same result. The method cannot take any arguments
# and should be accessed as an attribute.
class cachedproperty:
def __init__(self, f):
self.f = f
def __get__(self, obj, type):
obj = obj or type
value = self.f(obj)
setattr(obj, self.f.__name__, value)
return value
class Monotonic:
''' Returns a monotonically increasing int each time an instance is called
as a function. Optionally thread-safe.'''
__slots__ = ('__call__',)
def __init__(self, locking=False):
counter = itertools.count()
self.__call__ = incr = lambda: next(counter)
if locking:
lock = threading.Lock()
def incr_with_lock():
with lock: return incr()
self.__call__ = incr_with_lock
_human_readable_thread_ids = defaultdict(Monotonic(locking=False)) # locking not needed on Monotonic instance as we lock the dict anyway
_human_readable_thread_ids_lock = threading.Lock()
_t0 = time.time()
def print_error(*args):
if not is_verbose: return
if verbose_thread_id:
with _human_readable_thread_ids_lock:
args = ("|%02d|"%_human_readable_thread_ids[threading.get_ident()], *args)
if verbose_timestamps:
args = ("|%7.3f|"%(time.time() - _t0), *args)
print_stderr(*args)
_print_lock = threading.RLock() # use a recursive lock in extremely rare case a signal handler does a print_error while lock held by same thread as sighandler invocation's thread
def _print_common(file, *args):
s_args = " ".join(str(item) for item in args) + "\n" # newline at end *should* implicitly .flush() underlying stream, but not always if redirecting to file
with _print_lock:
# locking is required here as TextIOWrapper subclasses are not thread-safe;
# see: https://docs.python.org/3.6/library/io.html#multi-threading
try:
file.write(s_args)
file.flush() # necessary if redirecting to file
except OSError:
'''In very rare cases IO errors can occur here. We tolerate them. See #1595.'''
def print_stderr(*args):
_print_common(sys.stderr, *args)
def print_msg(*args):
_print_common(sys.stdout, *args)
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=PyDecimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(args, kw_args):
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", func.__qualname__, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(args, kw_args)
@lru_cache()
def android_data_dir():
from com.chaquo.python import Python
context = Python.getPlatform().getApplication()
return context.getFilesDir().getPath() + '/data'
def ensure_sparse_file(filename):
if os.name == "nt":
try:
subprocess.call("fsutil sparse setFlag \""+ filename +"\" 1", shell=True)
except:
pass
def get_headers_dir(config):
return android_data_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electron Cash datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def standardize_path(path):
if path is not None:
path = os.path.normcase(os.path.realpath(os.path.abspath(path)))
return path
def get_new_wallet_name(wallet_folder: str) -> str:
i = 1
while True:
filename = "wallet_%d" % i
if os.path.exists(os.path.join(wallet_folder, filename)):
i += 1
else:
break
return filename
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc='utf8'):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020a'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir(prefer_local=False):
if 'ANDROID_DATA' in os.environ:
return android_data_dir()
elif os.name == 'posix' and "HOME" in os.environ:
return os.path.join(os.environ["HOME"], ".electron-cash" )
elif "APPDATA" in os.environ or "LOCALAPPDATA" in os.environ:
app_dir = os.environ.get("APPDATA")
localapp_dir = os.environ.get("LOCALAPPDATA")
# Prefer APPDATA, but may get LOCALAPPDATA if present and req'd.
if localapp_dir is not None and prefer_local or app_dir is None:
app_dir = localapp_dir
return os.path.join(app_dir, "ElectronCash")
else:
#raise Exception("No home directory found in environment variables.")
return
def make_dir(path):
# Make directory if it does not yet exist.
if not os.path.exists(path):
if os.path.islink(path):
raise BaseException('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
if x is None:
return _('Unknown')
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(PyDecimal(x) / scale_factor).rstrip('0').rstrip('.')
_cached_dp = None
from .caches import ExpiringCache
# This cache will eat about ~6MB of memory per 20,000 items, but it does make
# format_satoshis() run over 3x faster.
_fmt_sats_cache = ExpiringCache(maxlen=20000, name='format_satoshis cache')
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
global _cached_dp
if x is None:
return _('Unknown')
if precision is None:
precision = decimal_point
cache_key = (x,num_zeros,decimal_point,precision,is_diff,whitespaces)
result = _fmt_sats_cache.get(cache_key)
if result is not None:
return result
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
try:
result = ("{:" + decimal_format + "f}").format(x / pow(10, decimal_point)).rstrip('0')
except ArithmeticError:
# Normally doesn't happen but if x is a huge int, we may get
# OverflowError or other ArithmeticError subclass exception. See #1024.
return 'unknown'
integer_part, fract_part = result.split(".")
if not _cached_dp:
# We lazy init this here rather than at module level because iOS sets
# locale at startup -- so we should initialize this variable on
# first run through this function rather than at module load time.
_cached_dp = localeconv().get('decimal_point') or '.'
dp = _cached_dp
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
_fmt_sats_cache.put(cache_key, result)
return result
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=num_zeros)
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
if timestamp:
date = timestamp_to_datetime(timestamp)
if date:
return date.isoformat(' ')[:-3]
return _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return _("Unknown")
try:
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
else:
if isinstance(since_date, (int, float)):
since_date = datetime.fromtimestamp(since_date)
except ValueError:
return _("Error")
td = time_difference(from_date - since_date, include_seconds)
if from_date < since_date:
return _("{time} ago").format(time=td)
else:
return _("in {time}").format(time=td)
def time_difference(distance_in_time, include_seconds):
#distance_in_time = from_date - since_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds / 60))
if distance_in_seconds < 60:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return _("less than {seconds} seconds").format(seconds=remainder)
if distance_in_seconds < 40:
return _("half a minute")
else:
return _("about a minute")
else:
return _("less than a minute")
elif distance_in_seconds < 90:
return _("about a minute")
elif distance_in_minutes < 45:
fmt = ngettext("{minutes} minute", "{minutes} minutes", distance_in_minutes)
return fmt.format(minutes=distance_in_minutes)
elif distance_in_minutes < 90:
return _("about 1 hour")
elif distance_in_minutes < 1440:
distance_in_hours = round(distance_in_minutes / 60.0)
fmt = ngettext("{hours} hour", "{hours} hours", distance_in_hours)
return fmt.format(hours=distance_in_hours)
elif distance_in_minutes < 2160:
return _("about 1 day")
elif distance_in_minutes < 43220:
distance_in_days = round(distance_in_minutes / 1440.0)
fmt = ngettext("{days} day", "{days} days", distance_in_days)
return fmt.format(days=distance_in_days)
elif distance_in_minutes < 64830:
return _("about 1 month")
elif distance_in_minutes < 525600:
distance_in_months = round(distance_in_minutes / 43200.0)
fmt = ngettext("{months} month", "{months} months", distance_in_months)
return fmt.format(months=distance_in_months)
elif distance_in_minutes < 788400:
return _("about 1 year")
else:
distance_in_years = round(distance_in_minutes / 525600.0)
fmt = ngettext("{years} year", "{years} years", distance_in_years)
return fmt.format(years=distance_in_years)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except Exception:
# just consume the line and ignore error.
j = None
return j, message[n+1:]
class timeout(Exception):
''' Server timed out on broadcast tx (normally due to a bad connection).
Exception string is the translated error string.'''
pass
TimeoutException = timeout # Future compat. with Electrum codebase/cherrypicking
class ServerError(Exception):
''' Note exception string is the translated, gui-friendly error message.
self.server_msg may be a dict or a string containing the raw response from
the server. Do NOT display self.server_msg in GUI code due to potential for
phishing attacks from the untrusted server.
See: https://github.com/spesmilo/electrum/issues/4968 '''
def __init__(self, msg, server_msg = None):
super().__init__(msg)
self.server_msg = server_msg or '' # prefer empty string if none supplied
class ServerErrorResponse(ServerError):
''' Raised by network.py broadcast_transaction2() when the server sent an
error response. The actual server error response is contained in a dict
and/or str in self.server_msg. Warning: DO NOT display the server text.
Displaying server text harbors a phishing risk. Instead, a translated
GUI-friendly 'deduced' response is in the exception string.
See: https://github.com/spesmilo/electrum/issues/4968 '''
pass
class TxHashMismatch(ServerError):
''' Raised by network.py broadcast_transaction2().
Server sent an OK response but the txid it supplied does not match our
signed tx id that we requested to broadcast. The txid returned is
stored in self.server_msg. It's advised not to display
the txid response as there is also potential for phishing exploits if
one does. Instead, the exception string contains a suitable translated
GUI-friendly error message. '''
pass
import socket
import ssl
import errno
class JSONSocketPipe(PrintError):
""" Non-blocking wrapper for a socket passing one-per-line json messages:
<json><newline><json><newline><json><newline>...
Correctly handles SSL sockets and gives useful info for select loops.
"""
class Closed(RuntimeError):
''' Raised if socket is closed '''
def __init__(self, socket, *, max_message_bytes=0):
''' A max_message_bytes of <= 0 means unlimited, otherwise a positive
value indicates this many bytes to limit the message size by. This is
used by get(), which will raise MessageSizeExceeded if the message size
received is larger than max_message_bytes. '''
self.socket = socket
socket.settimeout(0)
self.recv_time = time.time()
self.max_message_bytes = max_message_bytes
self.recv_buf = bytearray()
self.send_buf = bytearray()
def idle_time(self):
return time.time() - self.recv_time
def get_selectloop_info(self):
''' Returns tuple:
read_pending - new data is available that may be unknown to select(),
so perform a get() regardless of select().
write_pending - some send data is still buffered, so make sure to call
send_flush if writing becomes available.
'''
try:
# pending() only defined on SSL sockets.
has_pending = self.socket.pending() > 0
except AttributeError:
has_pending = False
return has_pending, bool(self.send_buf)
def get(self):
''' Attempt to read out a message, possibly saving additional messages in
a receive buffer.
If no message is currently available, this raises util.timeout and you
should retry once data becomes available to read. If connection is bad for
some known reason, raises .Closed; other errors will raise other exceptions.
'''
while True:
response, self.recv_buf = parse_json(self.recv_buf)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except (socket.timeout, BlockingIOError, ssl.SSLWantReadError):
raise timeout
except OSError as exc:
if exc.errno in (11, 35, 60, 10035):
# some OSes might give these ways of indicating a would-block error.
raise timeout
if exc.errno == 9:
# EBADF. Someone called close() locally so FD is bad.
raise self.Closed('closed by local')
raise self.Closed('closing due to {}: {}'.format(type(exc).__name__, str(exc)))
except ssl.SSLError as e:
# Note: rarely an SSLWantWriteError can happen if we renegotiate
# SSL and buffers are full. This is pretty annoying to handle
# right and we don't expect to renegotiate, so just drop
# connection.
raise self.Closed('closing due to {}: {}'.format(type(exc).__name__, str(exc)))
if not data:
raise self.Closed('closed by remote')
self.recv_buf.extend(data)
self.recv_time = time.time()
if self.max_message_bytes > 0 and len(self.recv_buf) > self.max_message_bytes:
raise self.Closed(f"Message limit is: {self.max_message_bytes}; receive buffer exceeded this limit!")
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self.send_buf.extend(out)
return self.send_flush()
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self.send_buf.extend(out)
return self.send_flush()
def send_flush(self):
''' Flush any unsent data from a prior call to send / send_all.
Raises timeout if more data remains to be sent.
Raise .Closed in the event of a socket error that requires abandoning
this socket.
'''
send_buf = self.send_buf
while send_buf:
try:
sent = self.socket.send(send_buf)
except (socket.timeout, BlockingIOError, ssl.SSLWantWriteError):
raise timeout
except OSError as exc:
if exc.errno in (11, 35, 60, 10035):
# some OSes might give these ways of indicating a would-block error.
raise timeout
if exc.errno == 9:
# EBADF. Someone called close() locally so FD is bad.
raise self.Closed('closed by local')
raise self.Closed('closing due to {}: {}'.format(type(exc).__name__, str(exc)))
except ssl.SSLError as e:
# Note: rarely an SSLWantReadError can happen if we renegotiate
# SSL and buffers are full. This is pretty annoying to handle
# right and we don't expect to renegotiate, so just drop
# connection.
raise self.Closed('closing due to {}: {}'.format(type(exc).__name__, str(exc)))
if sent == 0:
# shouldn't happen, but just in case, we don't want to infinite
# loop.
raise timeout
del send_buf[:sent]
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
''' Please do not use this function as it breaks with EC version styles
of things like '3.3.4CS'. Instead, use lib/version.parse_package_version'''
return tuple(map(int, (v.split("."))))
class Handlers:
''' A place to put app-global handlers. Currently the
"do_in_main_thread_handler" lives here '''
@staticmethod
def default_do_in_main_thread_handler(func, *args, **kwargs):
''' The default "do_in_main_thread_handler" simply immediately calls
func, but it does print a warning if the current thread is not
the main thread. '''
this_thread = threading.current_thread()
if this_thread is not threading.main_thread():
print_stderr(f"Warning: do_in_main_thread called with the default handler"
f" from outside the main thread (thr: {this_thread.name});"
" such usage may lead to undefined behavior. Traceback:\n",
''.join(traceback.format_stack()))
func(*args, **kwargs)
# GUI subsystems that wish to use `do_in_main_thread` (defined below) must
# register a handler by setting this class-level attribute. See
# ElectrumGui._setup_do_in_main_thread_handler in gui/qt/__init__py for an
# example of how this is done for Qt.
do_in_main_thread = default_do_in_main_thread_handler
def do_in_main_thread(func, *args, **kwargs):
''' Calls func(*args, **kwargs) in the main thread, or immediately if the
calling context *is* the main thread. Note that for this to work the GUI
system in question must install a handler for this mechanism (if it has an
event loop that is!) and set the global Handlers.do_in_main_thread =
someFunc() to actually post the invocation to the main thread. The default
handler immediately invokes func, but it does print a warning if the current
thread is not the main thread '''
if threading.current_thread() is threading.main_thread():
func(*args, **kwargs)
else:
Handlers.do_in_main_thread(func, *args, **kwargs)
def in_main_thread(func):
"""
Function decorator that runs the decorated function in the main thread.
"""
def wrapper(*args, **kwargs):
do_in_main_thread(func, *args, **kwargs)
return wrapper
class Weak:
'''
Weak reference factory. Create either a weak proxy to a bound method
or a weakref.proxy, depending on whether this factory class's __new__ is
invoked with a bound method or a regular function/object as its first
argument.
If used with an object/function reference this factory just creates a
weakref.proxy and returns that.
myweak = Weak(myobj)
type(myweak) == weakref.proxy # <-- True
The interesting usage is when this factory is used with a bound method
instance. In which case it returns a MethodProxy which behaves like
a proxy to a bound method in that you can call the MethodProxy object
directly:
mybound = Weak(someObj.aMethod)
mybound(arg1, arg2) # <-- invokes someObj.aMethod(arg1, arg2)
This is unlike regular weakref.WeakMethod which is not a proxy and requires
unsightly `foo()(args)`, or perhaps `foo() and foo()(args)` idioms.
Also note that no exception is raised with MethodProxy instances when
calling them on dead references.
Instead, if the weakly bound method is no longer alive (because its object
died), the situation is ignored as if no method were called (with an
optional print facility provided to print debug information in such a
situation).
The optional `print_func` class attribute can be set in MethodProxy
globally or for each instance specifically in order to specify a debug
print function (which will receive exactly two arguments: the
MethodProxy instance and an info string), so you can track when your weak
bound method is being called after its object died (defaults to
`print_error`).
Note you may specify a second postional argument to this factory,
`callback`, which is identical to the `callback` argument in the weakref
documentation and will be called on target object finalization
(destruction).
This usage/idiom is intented to be used with Qt's signal/slots mechanism
to allow for Qt bound signals to not prevent target objects from being
garbage collected due to reference cycles -- hence the permissive,
exception-free design.'''
def __new__(cls, obj_or_bound_method, *args, **kwargs):
if inspect.ismethod(obj_or_bound_method):
# is a method -- use our custom proxy class
return cls.MethodProxy(obj_or_bound_method, *args, **kwargs)
else:
# Not a method, just return a weakref.proxy
return weakref.proxy(obj_or_bound_method, *args, **kwargs)
ref = weakref.ref # alias for convenience so you don't have to import weakref
Set = weakref.WeakSet # alias for convenience
ValueDictionary = weakref.WeakValueDictionary # alias for convenience
KeyDictionary = weakref.WeakKeyDictionary # alias for convenience
Method = weakref.WeakMethod # alias
finalize = weakref.finalize # alias
_weak_refs_for_print_error = defaultdict(list)
@staticmethod
def finalization_print_error(obj, msg=None):
''' Supply a message to be printed via print_error when obj is
finalized (Python GC'd). This is useful for debugging memory leaks. '''
assert not isinstance(obj, type), "finaliztion_print_error can only be used on instance objects!"
if msg is None:
if isinstance(obj, PrintError):
name = obj.diagnostic_name()
else:
name = obj.__class__.__qualname__
msg = "[{}] finalized".format(name)
def finalizer(x):
wrs = Weak._weak_refs_for_print_error
msgs = wrs.get(x, [])
for m in msgs:
print_error(m)
wrs.pop(x, None)
wr = Weak.ref(obj, finalizer)
Weak._weak_refs_for_print_error[wr].append(msg)
class MethodProxy(weakref.WeakMethod):
''' Direct-use of this class is discouraged (aside from assigning to
its print_func attribute). Instead use of the wrapper class 'Weak'
defined in the enclosing scope is encouraged. '''
print_func = lambda x, this, info: print_error(this, info) # <--- set this attribute if needed, either on the class or instance level, to control debug printing behavior. None is ok here.
def __init__(self, meth, *args, **kwargs):
super().__init__(meth, *args, **kwargs)
# teehee.. save some information about what to call this thing for debug print purposes
self.qname, self.sname = meth.__qualname__, str(meth.__self__)
def __call__(self, *args, **kwargs):
''' Either directly calls the method for you or prints debug info
if the target object died '''
meth = super().__call__() # if dead, None is returned
if meth: # could also do callable() as the test but hopefully this is sightly faster
return meth(*args,**kwargs)
elif callable(self.print_func):
self.print_func(self, "MethodProxy for '{}' called on a dead reference. Referent was: {})".format(self.qname,
self.sname))
# Export this method to the top level for convenience. People reading code
# may wonder 'Why Weak.finaliztion_print_error'?. The fact that this relies on
# weak refs is an implementation detail, really.
finalization_print_error = Weak.finalization_print_error
| |
"""DXR's concept of version control systems
The main entry points are `tree_to_repos`, which produces a mapping of roots
to VCS objects for each version control root discovered under the provided
tree, and `path_to_vcs`, which returns a VCS object for the version control
system that tracks the given path. Currently supported VCSs are Mercurial,
Git, and Perforce.
Currently supported upstream views:
- Git (GitHub)
- Mercurial (hgweb)
TODO:
- Add gitweb support for git.
- Add cvs, svn, bzr support.
- Produce in-DXR blame information using VCSs.
- Check if the mercurial paths are specific to Mozilla's customization or not.
"""
from datetime import datetime
import marshal
import os
from os.path import exists, join, realpath, relpath, split
from pkg_resources import resource_filename
import subprocess
import urlparse
from warnings import warn
import hglib
from ordereddict import OrderedDict
from dxr.utils import without_ending
class Vcs(object):
"""A class representing an abstract notion of a version-control system.
In general, all path arguments to query methods should be normalized to be
relative to the root directory of the VCS.
"""
def __init__(self, root):
self.root = root
def get_root_dir(self):
"""Return the directory that is at the root of the VCS."""
return self.root
def get_vcs_name(self):
"""Return a recognizable name for the VCS."""
return type(self).__name__
@classmethod
def invoke_vcs(cls, args, cwd, **kwargs):
"""Return the result of invoking the VCS command on the repository from
given working directory, with extra kwargs passed along to the Popen constructor.
"""
return subprocess.check_output([cls.command] + args, cwd=cwd, **kwargs)
def is_tracked(self, path):
"""Does the repository track this file?"""
raise NotImplementedError
def has_upstream(self):
"""Return true if this VCS has a usable upstream."""
return NotImplemented
# Note: the generate_* methods shouldn't be expected to return useful URLs
# unless this VCS has_upstream().
def generate_log(self, path):
"""Construct URL to upstream view of log of file at path."""
raise NotImplementedError
def generate_diff(self, path):
"""Construct URL to upstream view of diff of file at path."""
raise NotImplementedError
def generate_blame(self, path):
"""Construct URL to upstream view of blame on file at path."""
raise NotImplementedError
def generate_raw(self, path):
"""Construct URL to upstream view to raw file at path."""
raise NotImplementedError
def last_modified_date(self, path):
"""Return a datetime object that represents the last UTC a commit was
made to the given path.
"""
raise NotImplementedError
@classmethod
def get_contents(cls, working_dir, rel_path, revision, stderr=None):
"""Return contents of a file at a certain revision.
:arg working_dir: The working directory from which to run the VCS
command. Beware that the dirs which existed at the rev in question
may not exist in the checked-out rev. Also, you cannot blithely use
the root of the source folder, as there may be, for instance,
nested git repos in the tree.
:arg rel_path: The relative path to the file, from ``working_dir``
:arg revision: The revision at which to pull the file, in a
VCS-dependent format
"""
raise NotImplementedError
def display_rev(self, path):
"""Return a human-readable revision identifier for the repository."""
raise NotImplementedError
class Mercurial(Vcs):
command = 'hg'
def __init__(self, root):
super(Mercurial, self).__init__(root)
hgext = resource_filename('dxr', 'hgext/previous_revisions.py')
with hglib.open(root,
configs=['extensions.previous_revisions=%s' % hgext]) as client:
tip = client.tip()
self.revision = tip.node
self.previous_revisions = self._find_previous_revisions(client)
self.upstream = self._construct_upstream_url()
def has_upstream(self):
return self.upstream != ""
def _construct_upstream_url(self):
with open(os.devnull, 'w') as devnull:
try:
upstream = urlparse.urlparse(self.invoke_vcs(['paths', 'default'],
self.root, stderr=devnull).strip())
except subprocess.CalledProcessError:
# No default path, so no upstream
return ""
recomb = list(upstream)
if upstream.scheme == 'ssh':
recomb[0] = 'http'
recomb[1] = upstream.hostname # Eliminate any username stuff
# check if port is defined and add that to the url
if upstream.port:
recomb[1] += ":{}".format(upstream.port)
recomb[2] = '/' + recomb[2].lstrip('/') # strip all leading '/', add one back
if not upstream.path.endswith('/'):
recomb[2] += '/' # Make sure we have a '/' on the end
recomb[3] = recomb[4] = recomb[5] = '' # Just those three
return urlparse.urlunparse(recomb)
def _find_previous_revisions(self, client):
"""Find the last revision and date in which each file changed, for diff
links and timestamps..
Return a mapping {path: date, last commit nodes in which file at path changed}
"""
last_change = {}
for line in client.rawcommand(['previous-revisions']).splitlines():
commit, date, path = line.split('@', 2)
last_change[path] = (commit, datetime.utcfromtimestamp(float(date)))
return last_change
@classmethod
def claim_vcs_source(cls, path, dirs, tree):
if '.hg' in dirs:
# Make sure mercurial is happy before claiming the source.
try:
Mercurial.invoke_vcs(['status'], path)
except subprocess.CalledProcessError:
return None
dirs.remove('.hg')
return cls(path)
return None
def display_rev(self, path):
return self.revision[:12]
def is_tracked(self, path):
return path in self.previous_revisions
def last_modified_date(self, path):
if path in self.previous_revisions:
return self.previous_revisions[path][1]
def generate_raw(self, path):
return "{}raw-file/{}/{}".format(self.upstream, self.revision, path)
def generate_diff(self, path):
# We generate link to diff with the last revision in which the file changed.
return "{}diff/{}/{}".format(self.upstream, self.previous_revisions[path][0], path)
def generate_blame(self, path):
return "{}annotate/{}/{}#l{{{{line}}}}".format(self.upstream, self.revision, path)
def generate_log(self, path):
return "{}filelog/{}/{}".format(self.upstream, self.revision, path)
@classmethod
def get_contents(cls, working_dir, rel_path, revision, stderr=None):
return cls.invoke_vcs(['cat', '-r', revision, rel_path], working_dir, stderr=stderr)
class Git(Vcs):
command = 'git'
def __init__(self, root):
super(Git, self).__init__(root)
self.tracked_files = set(line for line in
self.invoke_vcs(['ls-files'], self.root).splitlines())
self.revision = self.invoke_vcs(['rev-parse', 'HEAD'], self.root).strip()
self.upstream = self._construct_upstream_url()
self.last_changed = self._find_last_changed()
def _find_last_changed(self):
"""Return map {path: date of last authored change}
"""
consume_date = True
current_date = None
last_changed = {}
for line in self.invoke_vcs(
['log', '--format=format:%at', '--name-only'], self.root).splitlines():
# Commits are separated by empty lines.
if not line:
# Then the next line is a date.
consume_date = True
else:
if consume_date:
current_date = datetime.utcfromtimestamp(float(line))
consume_date = False
else:
# Then the line should have a file path, record it if we have
# not seen it and it's tracked.
if line in self.tracked_files and line not in last_changed:
last_changed[line] = current_date
return last_changed
def has_upstream(self):
return self.upstream != ""
def _construct_upstream_url(self):
source_urls = self.invoke_vcs(['remote', '-v'], self.root).split('\n')
for src_url in source_urls:
if not src_url:
continue
name, repo, _ = src_url.split()
# TODO: Why do we assume origin is upstream?
if name == 'origin':
if repo.startswith("git@github.com:"):
return "https://github.com/" + repo[len("git@github.com:"):]
elif repo.startswith(("git://github.com/", "https://github.com/")):
repo = without_ending('.git', repo)
if repo.startswith("git:"):
repo = "https" + repo[len("git"):]
return repo
warn("Your git remote is not supported yet. Please use a "
"GitHub remote if you would like version control "
"navigation links to show.")
break
return ""
def last_modified_date(self, path):
return self.last_changed.get(path)
@classmethod
def claim_vcs_source(cls, path, dirs, tree):
if '.git' in dirs:
try:
vcs = cls(path)
except subprocess.CalledProcessError:
pass
else:
dirs.remove('.git')
return vcs
def display_rev(self, path):
return self.revision[:10]
def is_tracked(self, path):
return path in self.tracked_files
def generate_raw(self, path):
return "{}/raw/{}/{}".format(self.upstream, self.revision, path)
def generate_diff(self, path):
# I really want to make this anchor on the file in question, but github
# doesn't seem to do that nicely
return "{}/commit/{}".format(self.upstream, self.revision)
def generate_blame(self, path):
return "{}/blame/{}/{}#L{{{{line}}}}".format(self.upstream, self.revision, path)
def generate_log(self, path):
return "{}/commits/{}/{}".format(self.upstream, self.revision, path)
@classmethod
def get_contents(cls, working_dir, rel_path, revision, stderr=None):
return cls.invoke_vcs(['show', revision + ':./' + rel_path], working_dir, stderr=stderr)
class Perforce(Vcs):
command = 'p4'
def __init__(self, root, upstream):
super(Perforce, self).__init__(root)
have = self._p4run(['have'])
self.have = dict((x['path'][len(root) + 1:], x) for x in have)
self.upstream = upstream
self.revision = self._p4run(['changes', '-m1', '#have'])[0]['change']
def has_upstream(self):
return self.upstream != ""
@classmethod
def claim_vcs_source(cls, path, dirs, tree):
if 'P4CONFIG' not in os.environ:
return None
if os.path.exists(os.path.join(path, os.environ['P4CONFIG'])):
return cls(path, tree.p4web_url)
return None
def _p4run(self, args):
ret = []
env = os.environ
env["PWD"] = self.root
proc = subprocess.Popen([self.command, '-G'] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self.root,
env=env)
while True:
try:
x = marshal.load(proc.stdout)
except EOFError:
break
ret.append(x)
return ret
def is_tracked(self, path):
return path in self.have
def generate_raw(self, path):
info = self.have[path]
return "{}{}?ac=98&rev1={}".format(self.upstream, info['depotFile'], info['haveRev'])
def generate_diff(self, path):
info = self.have[path]
haveRev = info['haveRev']
prevRev = str(int(haveRev) - 1)
return "{}{}?ac=19&rev1={}&rev2={}".format(self.upstream, info['depotFile'], prevRev, haveRev)
def generate_blame(self, path):
info = self.have[path]
return "{}{}?ac=193".format(self.upstream, info['depotFile'])
def generate_log(self, path):
info = self.have[path]
return "{}{}?ac=22#{}".format(self.upstream, info['depotFile'], info['haveRev'])
def display_rev(self, path):
info = self.have[path]
return '#' + info['haveRev']
@classmethod
def get_contents(cls, working_dir, rel_path, revision, stderr=None):
env = os.environ.copy()
env['PWD'] = working_dir
return subprocess.check_output([cls.command,
'print',
'-q',
rel_path + '@' + revision],
cwd=working_dir, env=env, stderr=stderr)
every_vcs = [Mercurial, Git, Perforce]
def tree_to_repos(tree):
"""Given a TreeConfig, return a mapping {root: Vcs object} where root is a
directory under tree.source_folder where root is a directory under
tree.source_folder. Traversal of the returned mapping follows the order of
deepest directory first.
:arg tree: TreeConfig object representing a source code tree
"""
sources = {}
# Find all of the VCSs in the source directory:
# We may see multiple VCS if we use git submodules, for example.
for cwd, dirs, files in os.walk(tree.source_folder):
for vcs in every_vcs:
attempt = vcs.claim_vcs_source(cwd, dirs, tree)
if attempt is not None:
sources[attempt.root] = attempt
# It's possible that the root of the tree is not a VCS by itself, so walk up
# the hierarchy until we find a parent folder that is a VCS. If we can't
# find any, then no VCSs exist for the top level of this repository.
directory = tree.source_folder
while directory != '/' and directory not in sources:
directory = os.path.dirname(directory)
for vcs in every_vcs:
attempt = vcs.claim_vcs_source(directory, os.listdir(directory), tree)
if attempt is not None:
sources[directory] = attempt
lookup_order = sorted(sources.keys(), key=len, reverse=True)
# We want to make sure that we look up source repositories by deepest
# directory first.
ordered_sources = OrderedDict()
for key in lookup_order:
ordered_sources[key] = sources[key]
return ordered_sources
def _split_existent(abs_folder):
"""Split a path to a dir in two, with the first half consisting of the
longest segment that exists on the FS; the second, the remainder."""
existent = abs_folder
nonexistent = ''
while existent:
if exists(existent):
break
existent, non = split(existent)
nonexistent = join(non, nonexistent)
return existent, nonexistent
def _is_within(inner, outer):
"""Return whether path ``inner`` is contained by or identical with folder
``outer``."""
# The added slashes are meant to prevent wrong answers if outer='z/a' and
# inner='z/abc'.
return (realpath(inner) + '/').startswith(realpath(outer) + '/')
def file_contents_at_rev(source_folder, rel_file, revision):
"""Attempt to return the contents of a file at a specific revision.
If such a file is not found, return None.
:arg source_folder: The absolute path to the root of the source folder for
the tree we're talking about
:arg rel_file: The source-folder-relative path to a file
:arg revision: The VCS revision identifier, in a format defined by the VCS
"""
# Rather than keeping a memory-intensive VcsCache around in the web process
# (which we haven't measured; it might be okay, but I'm afraid), just keep
# stepping rootward in the FS hierarchy until we find an actually existing
# dir. Regardless of the method, the point is to work even on files whose
# containing dirs have been moved or renamed.
rel_folder, file = split(rel_file)
abs_folder = join(source_folder, rel_folder)
existent, nonexistent = _split_existent(abs_folder)
# Security check: don't serve files outside the source folder:
if not _is_within(existent, source_folder):
return None
with open(os.devnull, 'w') as devnull:
for cls in every_vcs:
try:
return cls.get_contents(existent, join(nonexistent, file), revision, stderr=devnull)
except subprocess.CalledProcessError:
continue
class VcsCache(object):
"""This class offers a way to obtain Vcs objects for any file within a
given tree."""
def __init__(self, tree):
"""Construct a VcsCache for the given tree.
:arg tree: TreeConfig object representing a source code tree
"""
self.tree = tree
self.repos = tree_to_repos(tree)
self._path_cache = {}
def vcs_for_path(self, path):
"""Given a tree and a path in the tree, find a source repository we
know about that claims to track that file.
:arg string path: a path to a file (not a folder)
"""
if path in self._path_cache:
return self._path_cache[path]
abs_path = join(self.tree.source_folder, path)
for directory, vcs in self.repos.iteritems():
# This seems to be the easiest way to find "is abs_path in the
# subtree rooted at directory?"
if relpath(abs_path, directory).startswith('..'):
continue
if vcs.is_tracked(relpath(abs_path, vcs.get_root_dir())):
self._path_cache[path] = vcs
break
return self._path_cache.get(path)
| |
# coding: utf-8
import json
import mimetypes
import os
import re
import urllib
import urlparse
import xml.sax.saxutils
from htmlentitydefs import name2codepoint
def urlsplit(*args):
"""
.. function:: urlsplit(text1, [text2,...]) -> multiset
Breaks a given URL into multiple fields. The returned table schema is:
:scheme: What type the URL is (e.g. http, ftp ...)
:netloc: Network location of URL (e.g. www.text.com)
:path: Path part of URL (e.g. /data/2010/). It always has a slash at the end
:filename: Filename part of URL
:type: Mime type of URL, or if not a mime type exists, the extension part of filename.
:subtype: Mime subtype of URL.
:params: All parameters following ';' in URL.
:query: All parameters following '?' in URL.
:fragment: All parameters following '#' in URL.
Examples:
>>> table1('''
... http://www.test.com/apath/bpath/fname.pdf
... http://www.test.com/search.csv;p=5?q=test#hl=en
... ''')
>>> sql("select urlsplit(a) from table1")
scheme | netloc | path | filename | type | subtype | params | query | fragment
-------------------------------------------------------------------------------------------------------
http | www.test.com | /apath/bpath/ | fname.pdf | application | pdf | | |
http | www.test.com | / | search.csv | csv | | p=5 | q=test | hl=en
"""
yield ('scheme', 'netloc', 'path', 'filename', 'type', 'subtype', 'params', 'query', 'fragment')
url = ''.join(args)
u = urlparse.urlparse(''.join(args))
pf = os.path.split(u[2])
if len(pf) == 2:
path, filename = pf
else:
path, filename = pf[0], ''
if len(path) > 0 and path[-1] != '/':
path += '/'
m = mimetypes.guess_type(url)
if m[0] != None:
m1, m2 = m[0].split('/')
else:
m1, m2 = (os.path.splitext(filename)[1], '')
if len(m1) > 0 and m1[0] == '.':
m1 = m1[1:]
yield [u[0], u[1], path, filename, m1, m2, u[3], u[4], u[5]]
urlsplit.registered = True
urlsplit.multiset = True
def urllocation(*args):
"""
.. function:: urllocation(str) -> str
Returns the location part of provided URL.
Examples:
>>> table1('''
... http://www.test.com/apath/bpath/fname.pdf
... http://www.test.com/search.csv;p=5?q=test#hl=en
... ''')
>>> sql("select urllocation(a) from table1")
urllocation(a)
-----------------------------------------
http://www.test.com/apath/bpath/fname.pdf
http://www.test.com/search.csv
"""
u = urlparse.urlparse(''.join(args))
return u[0] + u'://' + ''.join(u[1:3])
urllocation.registered = True
def urlquery2jdict(*args):
"""
.. function:: urlquery2jdict(URL or URL_query_part) -> JDICT
Converts the query part of a URL into a JSON associative array.
Examples:
>>> table1('''
... 'url_ver=ver1&url_tim=2011-01-01T00%3A02%3A40Z'
... 'url_tim=2011-01-01T00%3A02%3A40Z&url_ver=ver1'
... http://www.test.com/search.csv;p=5?lang=test&ver=en
... ''')
>>> sql("select urlquery2jdict(a) from table1")
urlquery2jdict(a)
---------------------------------------------------
{"url_tim":"2011-01-01T00:02:40Z","url_ver":"ver1"}
{"url_tim":"2011-01-01T00:02:40Z","url_ver":"ver1"}
{"lang":"test","ver":"en"}
"""
url = args[0]
if url.startswith('http://') or url[0:1] == '/':
url = urlparse.urlparse(url)[4]
u = urlparse.parse_qs(url, True)
for x, y in u.iteritems():
if len(y) == 1:
u[x] = y[0]
return json.dumps(u, separators=(',', ':'), ensure_ascii=False)
urlquery2jdict.registered = True
EntityPattern = re.compile('&(?:#(\d+)|(?:#x([\da-fA-F]+))|([a-zA-Z]+));')
def htmlunescape(s):
def unescape(match):
code = match.group(1)
if code:
return unichr(int(code, 10))
else:
code = match.group(2)
if code:
return unichr(int(code, 16))
else:
code = match.group(3)
if code in name2codepoint:
return unichr(name2codepoint[code])
return match.group(0)
return EntityPattern.sub(unescape, s)
def htmldecode(*args):
"""
.. function:: htmldecode(str)
Returns the html decoded *str*.
Examples:
>>> sql("select htmldecode('("die+wunderbaren+jahre")') as query")
query
-------------------------
("die+wunderbaren+jahre")
>>> sql("select htmldecode(null) as query")
query
-----
None
"""
if len(args) > 1:
raise functions.OperatorError("htmldecode", "operator takes only one argument")
if args[0] == None:
return None
return htmlunescape(args[0])
htmldecode.registered = True
def htmlencode(*args):
"""
.. function:: htmldecode(str)
Returns the html decoded *str*.
Examples:
>>> sql("select htmldecode('("die+wunderbaren+jahre")') as query")
query
-------------------------
("die+wunderbaren+jahre")
>>> sql("select htmldecode(null) as query")
query
-----
None
"""
if len(args) > 1:
raise functions.OperatorError("htmldecode", "operator takes only one argument")
if args[0] == None:
return None
return xml.sax.saxutils.escape(u''.join(args[0]), {'"': """})
htmlencode.registered = True
tags = re.compile(r'<([^>]*?)>', re.UNICODE)
tagNL = re.compile(
r'(?:\s|^)(?:br|/p|/div|/head|/table|/tr|ul|/ul|/title|/tfoot|/thead|/span|/ol|/h1|/h2|/h3|/h4|/h5|/h6|/caption)(?:\s|$)',
re.UNICODE)
tagSPACE = re.compile(
r'(?:\s|^)(?:/\w+|wbr|p|div|head|table|tr|title|thead|tfoot|source|span|q|pre|ol|link|i|h1|h2|h3|h4|h5|h6|em|code|caption|a|figure|figcaption)(?:\s|$)',
re.UNICODE)
tagUnderscore = re.compile(r'(?:\s|^)(?:sup|sub)(?:\s|$)', re.UNICODE)
def htmlstriptags(*args):
"""
.. function:: htmlstriptags(str, default_tag_conversion)
Strips the html tags of input. It also converts "<br>" tags to new lines. If a default_tag_conversion is provided
then tags that would have been erased are converted to *default_tag_conversion*.
Examples:
>>> sql("select htmlstriptags('<tag1>asdf<>as< br>df<p class = lala>spaced</sp>paragraph</p>anotherline<tag2> w<sup>3</sup>') as query")
query
-------------------------------------------
asdfas
df spaced paragraph
anotherline w_3
>>> sql("select htmlstriptags('<tag1>asdf<>as< br>df<p class = lala>spaced</sp>paragraph</p>anotherline<tag2> w<sup>3</sup>', '***') as query")
query
----------------------------------------------------
***asdf***as
df spaced paragraph
anotherline*** w_3
>>> sql("select htmlstriptags(null) as query")
query
-----
<BLANKLINE>
"""
default_tag_conversion = u''
if len(args) > 1:
default_tag_conversion = unicode(args[1])
def tagdecode(tag):
t = tag.group(1).lower()
if tagNL.search(t):
return u'\n'
if tagSPACE.search(t):
return u' '
if tagUnderscore.search(t):
return u'_'
else:
return default_tag_conversion
if args[0] is not None:
text = unicode(args[0])
else:
text = ''
return tags.sub(tagdecode, text)
htmlstriptags.registered = True
def urldecode(*args):
"""
.. function:: urldecode(str)
Returns the url decoded *str*.
Examples:
>>> sql("select urldecode('where%2Ccollid%3Dcolid+and+u%3D%27val%27') as query")
query
------------------------------
where,collid=colid and u='val'
>>> sql("select urldecode(null) as query")
query
-----
None
"""
if len(args) > 1:
raise functions.OperatorError("urldecode", "operator takes only one argument")
if args[0] != None:
return unicode(urllib.unquote_plus(args[0]))
return None
urldecode.registered = True
def urlencode(*args):
"""
.. function:: urlescape(str)
Returns the escaped URL.
Examples:
>>> sql("select urlencode('where, collid=colid') as query")
query
-----------------------
where%2C+collid%3Dcolid
"""
if len(args) > 1:
raise functions.OperatorError("urlencode", "operator takes only one argument")
if args[0] != None:
return urllib.quote_plus(unicode(args[0]))
return None
urlencode.registered = True
addwbr = re.compile(r'([./-])([^./\-\d\s])', re.DOTALL | re.UNICODE)
def htmladdbreaks(*args):
"""
.. function:: url(href, linktext)
Returns the a url pointing to *href* and having the link text *linktext*.
Examples:
>>> sql("select htmladdbreaks('very-long/string') as brokenhtml")
brokenhtml
--------------------------
very-<wbr>long/<wbr>string
"""
if args[0] == None:
return None
out = u''.join([unicode(x) for x in args])
return addwbr.sub(r'\1<wbr>\2', out)
htmladdbreaks.registered = True
def htmllink(*args):
"""
.. function:: htmllink(href, linktext)
Returns the an html link pointing to *href* and having the link text *linktext*.
Examples:
>>> sql("select htmllink('http://somewhere.org') as url") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
url
-----------------------------------------------------------------
<a href="http://somewhere.org">http://<wbr>somewhere.<wbr>org</a>
>>> sql("select htmllink('somewhere.org') as url")
url
-----------------------------------------------------
<a href="http://somewhere.org">somewhere.<wbr>org</a>
>>> sql("select htmllink('somewhere.org', 'go somewhere') as url")
url
-----------------------------------------------
<a href="http://somewhere.org">go somewhere</a>
"""
def addhttp(u):
if u.find('://') == -1:
return u'http://' + unicode(u)
return unicode(u)
if len(args) > 2:
raise functions.OperatorError("url", "operator a maximum of two arguments")
if len(args) == 2:
if args[1] != None:
return '<a href="' + addhttp(args[0]) + '">' + unicode(args[1]) + '</a>'
if args[0] == None:
return None
return '<a href="' + addhttp(args[0]) + '">' + htmladdbreaks(htmlencode(unicode(args[0]))) + '</a>'
htmllink.registered = True
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| |
#!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions, Variable
from random import choice, randint
from os.path import dirname, abspath, join
import string
import sys
sys.path.append(join(dirname(dirname(dirname(abspath(__file__)))), "support"))
from mathsupport import MathSupport
class MultiPass(Actions):
DEBUG = False
DEBUG_ERR = DEBUG or False
DEBUG_FUNC = DEBUG or False
DEBUG_MEAN = DEBUG or False
DEBUG_SUM = DEBUG or False
DEBUG_PRODUCT = DEBUG or False
DEBUG_MIN = DEBUG or False
DEBUG_MAX = DEBUG or False
DEBUG_MEDIAN = DEBUG or False
DEBUG_SORT = DEBUG or False
DEBUG_RSORT = DEBUG or False
DEBUG_ODDS = DEBUG or False
DEBUG_EVENS = DEBUG or False
DEBUG_MODE = DEBUG or False
DEBUG_RANGE = DEBUG or False
MAX_UINT32 = 2 * sys.maxint + 1
MAX_LIST_LEN = 1000
def start(self):
self.LIST_TYPE = choice(['INT32', 'DBL64'])
self.LIST_SIZE = randint(5, 1005)
self.state['m'] = MathSupport(list_type=self.LIST_TYPE, list_size=self.LIST_SIZE)
self.state['created'] = False
self.state['cnonce'] = Variable('cnonce')
self.state['cnonce'].set_slice(0,8)
self.state['enonce'] = Variable('enonce')
self.state['enonce'].set_slice(0,8)
def process_nonce_create(self):
if self.DEBUG_FUNC:
print('-- process_nonce_create --')
self.read(length=8, assign=self.state['cnonce'])
self.write(self.state['cnonce'])
return 0
def process_nonce_eval(self):
if self.DEBUG_FUNC:
print('-- process_nonce_eval --')
self.read(length=8, assign=self.state['enonce'])
if self.chance(0.001):
# send an invalid nonce
self.write(self.state['cnonce'])
expected = self.state['m'].pack_single_int32(-44)
self.read(length=len(expected), expect=expected)
if self.DEBUG_ERR:
print('exiting due to incorrect nonce.')
return -1
else:
self.write(self.state['enonce'])
return 0
def _send_command(self, command):
if self.DEBUG_FUNC:
print('-- _send_command --')
if self.chance(0.001):
# send invalid command (BAD)
cmd = self.state['m'].pack_command('BAD')
self.write(cmd)
expected = self.state['m'].pack_single_int32(-150)
self.read(length=len(expected), expect=expected)
if self.DEBUG_ERR:
print('exiting due to BAD command.')
return -1
else:
cmd = self.state['m'].pack_command(command)
self.write(cmd)
return 0
def send_eval(self):
if self.DEBUG_FUNC:
print('-- send_eval --')
# Eval only works after load_list has been run
if self.state['created'] == False:
# EVAL before load_list CREATE
if self._send_command('EVAL') < 0:
return -1
expected = self.state['m'].pack_single_int32(-10)
self.read(length=len(expected), expect=expected)
if self.DEBUG_ERR:
print('exiting due to EVAL before CREATE.')
return -1
else:
# pack and send EVAL command
if self._send_command('EVAL') < 0:
return -1
def _send_op_code(self, op_code):
if self.DEBUG_FUNC:
print('-- _send_op_code --')
if self.chance(0.001):
# invalid op_code to EVAL
op_code = self.state['m'].pack_op_code('BAD')
self.write(op_code)
expected = self.state['m'].pack_single_int32(-80)
self.read(length=len(expected), expect=expected)
if self.DEBUG_ERR:
print('exiting due to invalid op_code BAD.')
return -1
else:
# pack and send op_code
op_code = self.state['m'].pack_op_code(op_code)
self.write(op_code)
return 0
def send_create(self):
if self.DEBUG_FUNC:
print('-- send_create --')
if self.state['created'] == True:
# send CREATE command again
if self._send_command('CREATE') < 0:
return -1
expected = self.state['m'].pack_single_int32(-9)
self.read(length=len(expected), expect=expected)
if self.DEBUG_ERR:
print('exiting due to re-sending CREATE.')
return -1
else:
# pack and send CREATE command
if self._send_command('CREATE') < 0:
return -1
if self.chance(0.001):
# invalid list_type
# pack and send LIST_TYPE "BAD"
t = self.state['m'].pack_list_type('BAD')
self.write(t)
expected = self.state['m'].pack_single_int32(-30)
self.read(length=len(expected), expect=expected)
if self.DEBUG_ERR:
print('exiting due to invalid list_type BAD.')
return -1
else:
# pack and send LIST_TYPE
t = self.state['m'].pack_list_type()
self.write(t)
# pack and send LIST LENGTH
l = self.state['m'].pack_length_and_list(internal_list=True, split=True)
self.write(l[0])
if self.LIST_SIZE > self.MAX_LIST_LEN:
expected = self.state['m'].pack_single_int32(-70)
self.read(length=len(expected), expect=expected)
if self.DEBUG_ERR:
print('exiting due to too large of list_size.')
return -1
# pack and send LIST
self.write(l[1])
self.state['created'] = True
return 0
def _read_and_match_calculated_dbl64(self, val):
'''
This function will read the least significant byte of a double precision
float with a wildcard, and then read the other 7 bytes with an exact match.
Exact comparison with computed double precision floating point numbers
is non-deterministic when the numbers are computed in 2 different programming languages.
There are too many cases where the least significant digit or two
of a calculated float differ when calculated in C vs Python.
'''
exp_result = self.state['m'].pack_single(val)[1:]
# read 1 byte with wildcard
least_sig_byte = Variable('LSB')
least_sig_byte.set_slice(0,1)
self.read(length=1, assign=least_sig_byte)
# read 7 bytes for exact match
self.read(length=len(exp_result), expect=exp_result)
def _read_and_match_calculated_int32(self, val):
'''
Matching INTs is not a problem. So just do it.
'''
exp_result = self.state['m'].pack_single(val)
self.read(length=len(exp_result), expect=exp_result)
def _read_and_match_calculated_value(self, val):
if self.LIST_TYPE == 'INT32':
self._read_and_match_calculated_int32(val)
else:
self._read_and_match_calculated_dbl64(val)
def mean_op(self):
if self.DEBUG_MEAN:
print('-- mean_op --')
if self._send_op_code('MEAN') < 0:
return -1
# read expected result
val = self.state['m'].get_mean()
if self.DEBUG_MEAN:
print('mean_op: val = {0}'.format(val))
self._read_and_match_calculated_value(val)
def sum_op(self):
if self.DEBUG_SUM:
print('-- sum_op --')
if self._send_op_code('SUM') < 0:
return -1
# read expected result
val = self.state['m'].get_sum()
if self.DEBUG_SUM:
print('sum_op: val = {0}'.format(val))
self._read_and_match_calculated_value(val)
def product_op(self):
if self.DEBUG_PRODUCT:
print('-- product_op --')
if self._send_op_code('PRODUCT') < 0:
return -1
# read expected result
val = self.state['m'].get_product()
if self.DEBUG_PRODUCT:
print('product_op: val = {0}'.format(val))
self._read_and_match_calculated_value(val)
def min_op(self):
if self.DEBUG_MIN:
print('-- min_op --')
if self._send_op_code('MIN') < 0:
return -1
# read expected result
val = self.state['m'].get_min()
if self.DEBUG_MIN:
print('min_op: val = {0}'.format(val))
exp_result = self.state['m'].pack_single(val)
self.read(length=len(exp_result), expect=exp_result)
def max_op(self):
if self.DEBUG_MAX:
print('-- max_op --')
if self._send_op_code('MAX') < 0:
return -1
# read expected result
val = self.state['m'].get_max()
if self.DEBUG_MAX:
print('max_op: val = {0}'.format(val))
exp_result = self.state['m'].pack_single(val)
self.read(length=len(exp_result), expect=exp_result)
def median_op(self):
if self.DEBUG_MEDIAN:
print('-- median_op --')
if self._send_op_code('MEDIAN') < 0:
return -1
# read expected result
val = self.state['m'].get_median()
if self.DEBUG_MEDIAN:
print('median_op: val = {0}'.format(val))
self._read_and_match_calculated_value(val)
def sort_op(self):
if self.DEBUG_SORT:
print('-- sort_op --')
if self._send_op_code('SORT') < 0:
return -1
# read expected result
val_list = self.state['m'].get_sort()
if self.DEBUG_SORT:
print('sort_op: sorted list = {0}'.format(val_list))
exp_result = self.state['m'].pack_list(val_list)
self.read(length=len(exp_result), expect=exp_result)
def rsort_op(self):
if self.DEBUG_RSORT:
print('-- rsort_op --')
if self._send_op_code('RSORT') < 0:
return -1
# read expected result
val_list = self.state['m'].get_rsort()
if self.DEBUG_RSORT:
print('rsort_op: rsorted list = {0}'.format(val_list))
exp_result = self.state['m'].pack_list(val_list)
self.read(length=len(exp_result), expect=exp_result)
def odds_op(self):
if self.DEBUG_ODDS:
print('-- odds_op --')
if self._send_op_code('ODDS') < 0:
return -1
# read expected result
val_list = self.state['m'].get_odds()
if self.DEBUG_ODDS:
print('odds_op: odds count {0}, odds list = {1}'.format(len(val_list), val_list))
exp_result = self.state['m'].pack_length_and_list(val_list=val_list)
self.read(length=len(exp_result), expect=exp_result)
def evens_op(self):
if self.DEBUG_EVENS:
print('-- evens_op --')
if self._send_op_code('EVENS') < 0:
return -1
# read expected result
val_list = self.state['m'].get_evens()
if self.DEBUG_EVENS:
print('evens_op: evens count {0}, evens list = {1}'.format(len(val_list), val_list))
exp_result = self.state['m'].pack_length_and_list(val_list=val_list)
self.read(length=len(exp_result), expect=exp_result)
def mode_op(self):
if self.DEBUG_MODE:
print('-- mode_op --')
if self._send_op_code('MODE') < 0:
return -1
# read expected result
val_list = self.state['m'].get_mode()
if self.DEBUG_MODE:
print('mode_op: val_list = {0}'.format(val_list))
exp_result = self.state['m'].pack_length_and_list(val_list=val_list)
self.read(length=len(exp_result), expect=exp_result)
def range_op(self):
if self.DEBUG_RANGE:
print('-- range_op --')
if self._send_op_code('RANGE') < 0:
return -1
# read expected result
val = self.state['m'].get_range()
if self.DEBUG_RANGE:
print('range_op: val = {0}'.format(val))
self._read_and_match_calculated_value(val)
| |
"""Support for NuHeat thermostats."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF, SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
from . import DOMAIN as NUHEAT_DOMAIN
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
# Hold modes
MODE_AUTO = HVAC_MODE_AUTO # Run device schedule
MODE_HOLD_TEMPERATURE = "temperature"
MODE_TEMPORARY_HOLD = "temporary_temperature"
OPERATION_LIST = [HVAC_MODE_HEAT, HVAC_MODE_OFF]
SCHEDULE_HOLD = 3
SCHEDULE_RUN = 1
SCHEDULE_TEMPORARY_HOLD = 2
SERVICE_RESUME_PROGRAM = "resume_program"
RESUME_PROGRAM_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids
})
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NuHeat thermostat(s)."""
if discovery_info is None:
return
temperature_unit = hass.config.units.temperature_unit
api, serial_numbers = hass.data[NUHEAT_DOMAIN]
thermostats = [
NuHeatThermostat(api, serial_number, temperature_unit)
for serial_number in serial_numbers
]
add_entities(thermostats, True)
def resume_program_set_service(service):
"""Resume the program on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
if entity_id:
target_thermostats = [device for device in thermostats
if device.entity_id in entity_id]
else:
target_thermostats = thermostats
for thermostat in target_thermostats:
thermostat.resume_program()
thermostat.schedule_update_ha_state(True)
hass.services.register(
NUHEAT_DOMAIN, SERVICE_RESUME_PROGRAM, resume_program_set_service,
schema=RESUME_PROGRAM_SCHEMA)
class NuHeatThermostat(ClimateDevice):
"""Representation of a NuHeat Thermostat."""
def __init__(self, api, serial_number, temperature_unit):
"""Initialize the thermostat."""
self._thermostat = api.get_thermostat(serial_number)
self._temperature_unit = temperature_unit
self._force_update = False
@property
def name(self):
"""Return the name of the thermostat."""
return self._thermostat.room
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def temperature_unit(self):
"""Return the unit of measurement."""
if self._temperature_unit == "C":
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
if self._temperature_unit == "C":
return self._thermostat.celsius
return self._thermostat.fahrenheit
@property
def hvac_mode(self):
"""Return current operation. ie. heat, idle."""
if self._thermostat.heating:
return HVAC_MODE_HEAT
return HVAC_MODE_OFF
@property
def min_temp(self):
"""Return the minimum supported temperature for the thermostat."""
if self._temperature_unit == "C":
return self._thermostat.min_celsius
return self._thermostat.min_fahrenheit
@property
def max_temp(self):
"""Return the maximum supported temperature for the thermostat."""
if self._temperature_unit == "C":
return self._thermostat.max_celsius
return self._thermostat.max_fahrenheit
@property
def target_temperature(self):
"""Return the currently programmed temperature."""
if self._temperature_unit == "C":
return self._thermostat.target_celsius
return self._thermostat.target_fahrenheit
@property
def preset_mode(self):
"""Return current preset mode."""
schedule_mode = self._thermostat.schedule_mode
if schedule_mode == SCHEDULE_RUN:
return MODE_AUTO
if schedule_mode == SCHEDULE_HOLD:
return MODE_HOLD_TEMPERATURE
if schedule_mode == SCHEDULE_TEMPORARY_HOLD:
return MODE_TEMPORARY_HOLD
return MODE_AUTO
@property
def preset_modes(self):
"""Return available preset modes."""
return [
MODE_HOLD_TEMPERATURE,
MODE_TEMPORARY_HOLD
]
@property
def hvac_modes(self):
"""Return list of possible operation modes."""
return OPERATION_LIST
def resume_program(self):
"""Resume the thermostat's programmed schedule."""
self._thermostat.resume_schedule()
self._force_update = True
def set_preset_mode(self, preset_mode):
"""Update the hold mode of the thermostat."""
if preset_mode is None:
schedule_mode = SCHEDULE_RUN
elif preset_mode == MODE_HOLD_TEMPERATURE:
schedule_mode = SCHEDULE_HOLD
elif preset_mode == MODE_TEMPORARY_HOLD:
schedule_mode = SCHEDULE_TEMPORARY_HOLD
self._thermostat.schedule_mode = schedule_mode
self._force_update = True
def set_temperature(self, **kwargs):
"""Set a new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if self._temperature_unit == "C":
self._thermostat.target_celsius = temperature
else:
self._thermostat.target_fahrenheit = temperature
_LOGGER.debug(
"Setting NuHeat thermostat temperature to %s %s",
temperature, self.temperature_unit)
self._force_update = True
def update(self):
"""Get the latest state from the thermostat."""
if self._force_update:
self._throttled_update(no_throttle=True)
self._force_update = False
else:
self._throttled_update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def _throttled_update(self, **kwargs):
"""Get the latest state from the thermostat with a throttle."""
self._thermostat.get_data()
| |
"""
Use lldb Python API to test dynamic values in C++
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class DynamicValueTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break for main.c.
self.do_something_line = line_number(
'pass-to-base.cpp', '// Break here in doSomething.')
self.main_first_call_line = line_number(
'pass-to-base.cpp',
'// Break here and get real addresses of myB and otherB.')
self.main_second_call_line = line_number(
'pass-to-base.cpp', '// Break here and get real address of reallyA.')
@add_test_categories(['pyapi'])
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24663")
def test_get_dynamic_vals(self):
"""Test fetching C++ dynamic values from pointers & references."""
self.build(dictionary=self.getBuildFlags())
exe = self.getBuildArtifact("a.out")
# Create a target from the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set up our breakpoints:
do_something_bpt = target.BreakpointCreateByLocation(
'pass-to-base.cpp', self.do_something_line)
self.assertTrue(do_something_bpt,
VALID_BREAKPOINT)
first_call_bpt = target.BreakpointCreateByLocation(
'pass-to-base.cpp', self.main_first_call_line)
self.assertTrue(first_call_bpt,
VALID_BREAKPOINT)
second_call_bpt = target.BreakpointCreateByLocation(
'pass-to-base.cpp', self.main_second_call_line)
self.assertTrue(second_call_bpt,
VALID_BREAKPOINT)
# Now launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, first_call_bpt)
self.assertTrue(len(threads) == 1)
thread = threads[0]
frame = thread.GetFrameAtIndex(0)
# Now find the dynamic addresses of myB and otherB so we can compare them
# with the dynamic values we get in doSomething:
use_dynamic = lldb.eDynamicCanRunTarget
no_dynamic = lldb.eNoDynamicValues
myB = frame.FindVariable('myB', no_dynamic)
self.assertTrue(myB)
myB_loc = int(myB.GetLocation(), 16)
otherB = frame.FindVariable('otherB', no_dynamic)
self.assertTrue(otherB)
otherB_loc = int(otherB.GetLocation(), 16)
# Okay now run to doSomething:
threads = lldbutil.continue_to_breakpoint(process, do_something_bpt)
self.assertTrue(len(threads) == 1)
thread = threads[0]
frame = thread.GetFrameAtIndex(0)
# Get "this" using FindVariable:
this_static = frame.FindVariable('this', no_dynamic)
this_dynamic = frame.FindVariable('this', use_dynamic)
self.examine_value_object_of_this_ptr(
this_static, this_dynamic, myB_loc)
# Now make sure that the "GetDynamicValue" works:
# This doesn't work currently because we can't get dynamic values from
# ConstResult objects.
fetched_dynamic_value = this_static.GetDynamicValue(use_dynamic)
self.examine_value_object_of_this_ptr(
this_static, fetched_dynamic_value, myB_loc)
# And conversely that the GetDynamicValue() interface also works:
fetched_static_value = this_dynamic.GetStaticValue()
self.examine_value_object_of_this_ptr(
fetched_static_value, this_dynamic, myB_loc)
# Get "this" using FindValue, make sure that works too:
this_static = frame.FindValue(
'this', lldb.eValueTypeVariableArgument, no_dynamic)
this_dynamic = frame.FindValue(
'this', lldb.eValueTypeVariableArgument, use_dynamic)
self.examine_value_object_of_this_ptr(
this_static, this_dynamic, myB_loc)
# Get "this" using the EvaluateExpression:
this_static = frame.EvaluateExpression('this', False)
this_dynamic = frame.EvaluateExpression('this', True)
self.examine_value_object_of_this_ptr(
this_static, this_dynamic, myB_loc)
# The "frame var" code uses another path to get into children, so let's
# make sure that works as well:
self.expect(
'frame var -d run-target --ptr-depth=2 --show-types anotherA.m_client_A',
'frame var finds its way into a child member',
patterns=['\(B \*\)'])
# Now make sure we also get it right for a reference as well:
anotherA_static = frame.FindVariable('anotherA', False)
self.assertTrue(anotherA_static)
anotherA_static_addr = int(anotherA_static.GetValue(), 16)
anotherA_dynamic = frame.FindVariable('anotherA', True)
self.assertTrue(anotherA_dynamic)
anotherA_dynamic_addr = int(anotherA_dynamic.GetValue(), 16)
anotherA_dynamic_typename = anotherA_dynamic.GetTypeName()
self.assertTrue(anotherA_dynamic_typename.find('B') != -1)
self.assertTrue(anotherA_dynamic_addr < anotherA_static_addr)
anotherA_m_b_value_dynamic = anotherA_dynamic.GetChildMemberWithName(
'm_b_value', True)
self.assertTrue(anotherA_m_b_value_dynamic)
anotherA_m_b_val = int(anotherA_m_b_value_dynamic.GetValue(), 10)
self.assertTrue(anotherA_m_b_val == 300)
anotherA_m_b_value_static = anotherA_static.GetChildMemberWithName(
'm_b_value', True)
self.assertFalse(anotherA_m_b_value_static)
# Okay, now continue again, and when we hit the second breakpoint in
# main
threads = lldbutil.continue_to_breakpoint(process, second_call_bpt)
self.assertTrue(len(threads) == 1)
thread = threads[0]
frame = thread.GetFrameAtIndex(0)
reallyA_value = frame.FindVariable('reallyA', False)
self.assertTrue(reallyA_value)
reallyA_loc = int(reallyA_value.GetLocation(), 16)
# Finally continue to doSomething again, and make sure we get the right value for anotherA,
# which this time around is just an "A".
threads = lldbutil.continue_to_breakpoint(process, do_something_bpt)
self.assertTrue(len(threads) == 1)
thread = threads[0]
frame = thread.GetFrameAtIndex(0)
anotherA_value = frame.FindVariable('anotherA', True)
self.assertTrue(anotherA_value)
anotherA_loc = int(anotherA_value.GetValue(), 16)
self.assertTrue(anotherA_loc == reallyA_loc)
self.assertTrue(anotherA_value.GetTypeName().find('B') == -1)
def examine_value_object_of_this_ptr(
self, this_static, this_dynamic, dynamic_location):
# Get "this" as its static value
self.assertTrue(this_static)
this_static_loc = int(this_static.GetValue(), 16)
# Get "this" as its dynamic value
self.assertTrue(this_dynamic)
this_dynamic_typename = this_dynamic.GetTypeName()
self.assertTrue(this_dynamic_typename.find('B') != -1)
this_dynamic_loc = int(this_dynamic.GetValue(), 16)
# Make sure we got the right address for "this"
self.assertTrue(this_dynamic_loc == dynamic_location)
# And that the static address is greater than the dynamic one
self.assertTrue(this_static_loc > this_dynamic_loc)
# Now read m_b_value which is only in the dynamic value:
use_dynamic = lldb.eDynamicCanRunTarget
no_dynamic = lldb.eNoDynamicValues
this_dynamic_m_b_value = this_dynamic.GetChildMemberWithName(
'm_b_value', use_dynamic)
self.assertTrue(this_dynamic_m_b_value)
m_b_value = int(this_dynamic_m_b_value.GetValue(), 0)
self.assertTrue(m_b_value == 10)
# Make sure it is not in the static version
this_static_m_b_value = this_static.GetChildMemberWithName(
'm_b_value', no_dynamic)
self.assertFalse(this_static_m_b_value)
# Okay, now let's make sure that we can get the dynamic type of a child
# element:
contained_auto_ptr = this_dynamic.GetChildMemberWithName(
'm_client_A', use_dynamic)
self.assertTrue(contained_auto_ptr)
contained_b = contained_auto_ptr.GetChildMemberWithName(
'_M_ptr', use_dynamic)
if not contained_b:
contained_b = contained_auto_ptr.GetChildMemberWithName(
'__ptr_', use_dynamic)
self.assertTrue(contained_b)
contained_b_static = contained_auto_ptr.GetChildMemberWithName(
'_M_ptr', no_dynamic)
if not contained_b_static:
contained_b_static = contained_auto_ptr.GetChildMemberWithName(
'__ptr_', no_dynamic)
self.assertTrue(contained_b_static)
contained_b_addr = int(contained_b.GetValue(), 16)
contained_b_static_addr = int(contained_b_static.GetValue(), 16)
self.assertTrue(contained_b_addr < contained_b_static_addr)
| |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.network.rpcapi
"""
import collections
import mock
from oslo_config import cfg
from nova import context
from nova.network import rpcapi as network_rpcapi
from nova.objects import base as objects_base
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
CONF = cfg.CONF
class NetworkRpcAPITestCase(test.NoDBTestCase):
def setUp(self):
super(NetworkRpcAPITestCase, self).setUp()
self.flags(multi_host=True)
# Used to specify the default value expected if no real value is passed
DefaultArg = collections.namedtuple('DefaultArg', ['value'])
def _test_network_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = network_rpcapi.NetworkAPI()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(CONF.network_topic, rpcapi.client.target.topic)
expected_retval = 'foo' if rpc_method == 'call' else None
expected_version = kwargs.pop('version', None)
expected_fanout = kwargs.pop('fanout', None)
expected_kwargs = kwargs.copy()
for k, v in expected_kwargs.items():
if isinstance(v, self.DefaultArg):
expected_kwargs[k] = v.value
kwargs.pop(k)
prepare_kwargs = {}
if expected_version:
prepare_kwargs['version'] = expected_version
if expected_fanout:
prepare_kwargs['fanout'] = True
if 'source_compute' in expected_kwargs:
# Fix up for migrate_instance_* calls.
expected_kwargs['source'] = expected_kwargs.pop('source_compute')
expected_kwargs['dest'] = expected_kwargs.pop('dest_compute')
targeted_methods = [
'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host',
'_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns',
'_associate_floating_ip', '_disassociate_floating_ip',
'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start',
'migrate_instance_finish',
'allocate_for_instance', 'deallocate_for_instance',
]
targeted_by_instance = ['deallocate_for_instance']
if method in targeted_methods and ('host' in expected_kwargs or
'instance' in expected_kwargs):
if method in targeted_by_instance:
host = expected_kwargs['instance']['host']
else:
host = expected_kwargs['host']
if method not in ['allocate_for_instance',
'deallocate_fixed_ip']:
expected_kwargs.pop('host')
if CONF.multi_host:
prepare_kwargs['server'] = host
with test.nested(
mock.patch.object(rpcapi.client, rpc_method),
mock.patch.object(rpcapi.client, 'prepare'),
mock.patch.object(rpcapi.client, 'can_send_version'),
) as (
rpc_mock, prepare_mock, csv_mock
):
version_check = [
'deallocate_for_instance', 'deallocate_fixed_ip',
'allocate_for_instance', 'release_fixed_ip',
'set_network_host', 'setup_networks_on_host'
]
if method in version_check:
csv_mock.return_value = True
if prepare_kwargs:
prepare_mock.return_value = rpcapi.client
if rpc_method == 'call':
rpc_mock.return_value = 'foo'
else:
rpc_mock.return_value = None
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(expected_retval, retval)
if method in version_check:
csv_mock.assert_called_once_with(mock.ANY)
if prepare_kwargs:
prepare_mock.assert_called_once_with(**prepare_kwargs)
rpc_mock.assert_called_once_with(ctxt, method, **expected_kwargs)
def test_create_networks(self):
self._test_network_api('create_networks', rpc_method='call',
arg1='arg', arg2='arg')
def test_delete_network(self):
self._test_network_api('delete_network', rpc_method='call',
uuid='fake_uuid', fixed_range='range')
def test_allocate_for_instance(self):
self._test_network_api('allocate_for_instance', rpc_method='call',
instance_id='fake_id', project_id='fake_id', host='fake_host',
rxtx_factor='fake_factor', vpn=False, requested_networks={},
macs=[], version='1.13')
def test_deallocate_for_instance(self):
instance = fake_instance.fake_instance_obj(context.get_admin_context())
self._test_network_api('deallocate_for_instance', rpc_method='call',
requested_networks=self.DefaultArg(None), instance=instance,
version='1.11')
def test_deallocate_for_instance_with_expected_networks(self):
instance = fake_instance.fake_instance_obj(context.get_admin_context())
self._test_network_api('deallocate_for_instance', rpc_method='call',
instance=instance, requested_networks={}, version='1.11')
def test_add_fixed_ip_to_instance(self):
self._test_network_api('add_fixed_ip_to_instance', rpc_method='call',
instance_id='fake_id', rxtx_factor='fake_factor',
host='fake_host', network_id='fake_id', version='1.9')
def test_remove_fixed_ip_from_instance(self):
self._test_network_api('remove_fixed_ip_from_instance',
rpc_method='call', instance_id='fake_id',
rxtx_factor='fake_factor', host='fake_host',
address='fake_address', version='1.9')
def test_add_network_to_project(self):
self._test_network_api('add_network_to_project', rpc_method='call',
project_id='fake_id', network_uuid='fake_uuid')
def test_get_instance_nw_info(self):
self._test_network_api('get_instance_nw_info', rpc_method='call',
instance_id='fake_id', rxtx_factor='fake_factor',
host='fake_host', project_id='fake_id', version='1.9')
def test_validate_networks(self):
self._test_network_api('validate_networks', rpc_method='call',
networks={})
def test_get_dns_domains(self):
self._test_network_api('get_dns_domains', rpc_method='call')
def test_add_dns_entry(self):
self._test_network_api('add_dns_entry', rpc_method='call',
address='addr', name='name', dns_type='foo', domain='domain')
def test_modify_dns_entry(self):
self._test_network_api('modify_dns_entry', rpc_method='call',
address='addr', name='name', domain='domain')
def test_delete_dns_entry(self):
self._test_network_api('delete_dns_entry', rpc_method='call',
name='name', domain='domain')
def test_delete_dns_domain(self):
self._test_network_api('delete_dns_domain', rpc_method='call',
domain='fake_domain')
def test_get_dns_entries_by_address(self):
self._test_network_api('get_dns_entries_by_address', rpc_method='call',
address='fake_address', domain='fake_domain')
def test_get_dns_entries_by_name(self):
self._test_network_api('get_dns_entries_by_name', rpc_method='call',
name='fake_name', domain='fake_domain')
def test_create_private_dns_domain(self):
self._test_network_api('create_private_dns_domain', rpc_method='call',
domain='fake_domain', av_zone='fake_zone')
def test_create_public_dns_domain(self):
self._test_network_api('create_public_dns_domain', rpc_method='call',
domain='fake_domain', project='fake_project')
def test_setup_networks_on_host(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
instance = fake_instance.fake_instance_obj(ctxt)
self._test_network_api('setup_networks_on_host', rpc_method='call',
instance_id=instance.id, host='fake_host', teardown=False,
instance=instance, version='1.16')
def test_setup_networks_on_host_v1_0(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
instance = fake_instance.fake_instance_obj(ctxt)
host = 'fake_host'
teardown = True
rpcapi = network_rpcapi.NetworkAPI()
call_mock = mock.Mock()
cctxt_mock = mock.Mock(call=call_mock)
with test.nested(
mock.patch.object(rpcapi.client, 'can_send_version',
return_value=False),
mock.patch.object(rpcapi.client, 'prepare',
return_value=cctxt_mock)
) as (
can_send_mock, prepare_mock
):
rpcapi.setup_networks_on_host(ctxt, instance.id, host, teardown,
instance)
# assert our mocks were called as expected
can_send_mock.assert_called_once_with('1.16')
prepare_mock.assert_called_once_with(version='1.0')
call_mock.assert_called_once_with(ctxt, 'setup_networks_on_host',
host=host, teardown=teardown,
instance_id=instance.id)
def test_lease_fixed_ip(self):
self._test_network_api('lease_fixed_ip', rpc_method='cast',
host='fake_host', address='fake_addr')
def test_release_fixed_ip(self):
self._test_network_api('release_fixed_ip', rpc_method='cast',
host='fake_host', address='fake_addr', mac='fake_mac',
version='1.14')
def test_release_fixed_ip_no_mac_support(self):
# Tests that the mac kwarg is not passed when we can't send version
# 1.14 to the network manager.
ctxt = context.RequestContext('fake_user', 'fake_project')
address = '192.168.65.158'
host = 'fake-host'
mac = '00:0c:29:2c:b2:64'
rpcapi = network_rpcapi.NetworkAPI()
cast_mock = mock.Mock()
cctxt_mock = mock.Mock(cast=cast_mock)
with test.nested(
mock.patch.object(rpcapi.client, 'can_send_version',
return_value=False),
mock.patch.object(rpcapi.client, 'prepare',
return_value=cctxt_mock)
) as (
can_send_mock, prepare_mock
):
rpcapi.release_fixed_ip(ctxt, address, host, mac)
# assert our mocks were called as expected 232
can_send_mock.assert_called_once_with('1.14')
prepare_mock.assert_called_once_with(server=host, version='1.0')
cast_mock.assert_called_once_with(ctxt, 'release_fixed_ip',
address=address)
def test_set_network_host(self):
network = fake_network.fake_network_obj(context.get_admin_context())
self._test_network_api('set_network_host', rpc_method='call',
network_ref=network, version='1.15')
def test_set_network_host_network_object_to_primitive(self):
# Tests that the network object is converted to a primitive if it
# can't send version 1.15.
ctxt = context.RequestContext('fake_user', 'fake_project')
network = fake_network.fake_network_obj(ctxt)
network_dict = objects_base.obj_to_primitive(network)
rpcapi = network_rpcapi.NetworkAPI()
call_mock = mock.Mock()
cctxt_mock = mock.Mock(call=call_mock)
with test.nested(
mock.patch.object(rpcapi.client, 'can_send_version',
return_value=False),
mock.patch.object(rpcapi.client, 'prepare',
return_value=cctxt_mock)
) as (
can_send_mock, prepare_mock
):
rpcapi.set_network_host(ctxt, network)
# assert our mocks were called as expected
can_send_mock.assert_called_once_with('1.15')
prepare_mock.assert_called_once_with(version='1.0')
call_mock.assert_called_once_with(ctxt, 'set_network_host',
network_ref=network_dict)
def test_rpc_setup_network_on_host(self):
self._test_network_api('rpc_setup_network_on_host', rpc_method='call',
network_id='fake_id', teardown=False, host='fake_host')
def test_rpc_allocate_fixed_ip(self):
self._test_network_api('_rpc_allocate_fixed_ip', rpc_method='call',
instance_id='fake_id', network_id='fake_id', address='addr',
vpn=True, host='fake_host')
def test_deallocate_fixed_ip(self):
instance = fake_instance.fake_db_instance()
self._test_network_api('deallocate_fixed_ip', rpc_method='call',
address='fake_addr', host='fake_host', instance=instance,
version='1.12')
def test_update_dns(self):
self._test_network_api('update_dns', rpc_method='cast', fanout=True,
network_ids='fake_id', version='1.3')
def test__associate_floating_ip(self):
self._test_network_api('_associate_floating_ip', rpc_method='call',
floating_address='fake_addr', fixed_address='fixed_address',
interface='fake_interface', host='fake_host',
instance_uuid='fake_uuid', version='1.6')
def test__disassociate_floating_ip(self):
self._test_network_api('_disassociate_floating_ip', rpc_method='call',
address='fake_addr', interface='fake_interface',
host='fake_host', instance_uuid='fake_uuid', version='1.6')
def test_migrate_instance_start(self):
self._test_network_api('migrate_instance_start', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host=self.DefaultArg(None),
version='1.2')
def test_migrate_instance_start_multi_host(self):
self._test_network_api('migrate_instance_start', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host='fake_host',
version='1.2')
def test_migrate_instance_finish(self):
self._test_network_api('migrate_instance_finish', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host=self.DefaultArg(None),
version='1.2')
def test_migrate_instance_finish_multi_host(self):
self._test_network_api('migrate_instance_finish', rpc_method='call',
instance_uuid='fake_instance_uuid',
rxtx_factor='fake_factor',
project_id='fake_project',
source_compute='fake_src_compute',
dest_compute='fake_dest_compute',
floating_addresses='fake_floating_addresses',
host='fake_host',
version='1.2')
| |
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/bar
/usr/local/lib/python2.X/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
if _is_jython:
ModuleType = type(os)
def makepath(*paths):
dir = os.path.join(*paths)
if _is_jython and (dir == '__classpath__' or
dir.startswith('__pyclasspath__')):
return dir, dir
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if ((_is_jython and not isinstance(m, ModuleType)) or
hasattr(m, '__loader__')):
# only modules need the abspath in Jython. and don't mess
# with a PEP 302-supplied __file__
continue
f = getattr(m, '__file__', None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if sys.maxsize > 2**32:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_jython:
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif _is_pypy:
builtins.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://codespeak.net/pypy")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt'))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == '':
pos += 1
if _is_jython:
paths = [os.path.join(sys.real_prefix, 'Lib')]
elif _is_pypy:
if sys.pypy_version_info >= (1, 5):
cpyver = '%d.%d' % sys.version_info[:2]
else:
cpyver = '%d.%d.%d' % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, 'lib_pypy'),
os.path.join(sys.real_prefix, 'lib-python', 'modified-%s' % cpyver),
os.path.join(sys.real_prefix, 'lib-python', cpyver)]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, 'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
elif sys.platform == 'win32':
paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')]
else:
paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3])
if os.path.exists(lib64_path):
if sys.maxsize > 2**32:
paths.insert(0, lib64_path)
else:
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to sys.prefix:
plat_path = os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3],
'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, 'lib-tk')
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == 'darwin':
hardcoded_paths = [os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
abs__file__()
paths_in_sys = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if _is_jython:
fixclasspath()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt'))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE)))
print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import timedelta
from flask import request
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.fields import BooleanField, HiddenField, SelectField, StringField, TextAreaField
from wtforms.validators import DataRequired, ValidationError
from indico.core.db import db
from indico.core.db.sqlalchemy.descriptions import RenderMode
from indico.modules.events.abstracts.settings import BOASortField
from indico.modules.events.contributions.fields import (ContributionPersonLinkListField,
SubContributionPersonLinkListField)
from indico.modules.events.contributions.models.references import ContributionReference, SubContributionReference
from indico.modules.events.contributions.models.types import ContributionType
from indico.modules.events.fields import ReferencesField
from indico.modules.events.util import check_permissions
from indico.util.date_time import get_day_end
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.forms.base import IndicoForm, generated_data
from indico.web.forms.fields import (HiddenFieldList, IndicoDateTimeField, IndicoEnumSelectField, IndicoLocationField,
IndicoProtectionField, IndicoTagListField)
from indico.web.forms.fields.datetime import IndicoDurationField
from indico.web.forms.fields.principals import PermissionsField
from indico.web.forms.validators import DateTimeRange, MaxDuration
from indico.web.forms.widgets import SwitchWidget
class ContributionForm(IndicoForm):
title = StringField(_('Title'), [DataRequired()])
description = TextAreaField(_('Description'))
start_dt = IndicoDateTimeField(_('Start date'),
[DataRequired(),
DateTimeRange(earliest=lambda form, field: form._get_earliest_start_dt(),
latest=lambda form, field: form._get_latest_start_dt())],
allow_clear=False,
description=_('Start date of the contribution'))
duration = IndicoDurationField(_('Duration'), [DataRequired(), MaxDuration(timedelta(hours=24))],
default=timedelta(minutes=20))
type = QuerySelectField(_('Type'), get_label='name', allow_blank=True, blank_text=_('No type selected'))
person_link_data = ContributionPersonLinkListField(_('People'))
location_data = IndicoLocationField(_('Location'))
keywords = IndicoTagListField(_('Keywords'))
references = ReferencesField(_('External IDs'), reference_class=ContributionReference,
description=_('Manage external resources for this contribution'))
board_number = StringField(_('Board Number'))
code = StringField(_('Program code'))
@generated_data
def render_mode(self):
return RenderMode.markdown
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.contrib = kwargs.pop('contrib', None)
self.session_block = kwargs.get('session_block')
self.timezone = self.event.timezone
to_schedule = kwargs.pop('to_schedule', False)
super().__init__(*args, **kwargs)
self.type.query = self.event.contribution_types
if self.event.type != 'conference':
self.person_link_data.label.text = _('Speakers')
if not self.type.query.count():
del self.type
if not to_schedule and (self.contrib is None or not self.contrib.is_scheduled):
del self.start_dt
def _get_earliest_start_dt(self):
return self.session_block.start_dt if self.session_block else self.event.start_dt
def _get_latest_start_dt(self):
return self.session_block.end_dt if self.session_block else self.event.end_dt
def validate_duration(self, field):
start_dt = self.start_dt.data if self.start_dt else None
if start_dt:
end_dt = start_dt + field.data
if self.session_block and end_dt > self.session_block.end_dt:
raise ValidationError(_('With the current duration the contribution exceeds the block end date'))
if end_dt > self.event.end_dt:
raise ValidationError(_('With the current duration the contribution exceeds the event end date'))
@property
def custom_field_names(self):
return tuple(field_name for field_name in self._fields if field_name.startswith('custom_'))
class ContributionProtectionForm(IndicoForm):
permissions = PermissionsField(_('Permissions'), object_type='contribution')
protection_mode = IndicoProtectionField(_('Protection mode'), protected_object=lambda form: form.protected_object,
acl_message_url=lambda form: url_for('contributions.acl_message',
form.protected_object))
def __init__(self, *args, **kwargs):
self.protected_object = contribution = kwargs.pop('contrib')
self.event = contribution.event
super().__init__(*args, **kwargs)
def validate_permissions(self, field):
except_msg = check_permissions(self.event, field)
if except_msg:
raise ValidationError(except_msg)
class SubContributionForm(IndicoForm):
title = StringField(_('Title'), [DataRequired()])
description = TextAreaField(_('Description'))
duration = IndicoDurationField(_('Duration'), [DataRequired(), MaxDuration(timedelta(hours=24))],
default=timedelta(minutes=20))
speakers = SubContributionPersonLinkListField(_('Speakers'), allow_submitters=False, allow_authors=False,
description=_('The speakers of the subcontribution'))
references = ReferencesField(_('External IDs'), reference_class=SubContributionReference,
description=_('Manage external resources for this sub-contribution'))
code = StringField(_('Program code'))
@generated_data
def render_mode(self):
return RenderMode.markdown
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.subcontrib = kwargs.pop('subcontrib', None)
super().__init__(*args, **kwargs)
class ContributionStartDateForm(IndicoForm):
start_dt = IndicoDateTimeField(_('Start date'), [DataRequired(),
DateTimeRange(earliest=lambda form, field: form.event.start_dt,
latest=lambda form, field: form.event.end_dt)],
allow_clear=False)
def __init__(self, *args, **kwargs):
self.contrib = kwargs.pop('contrib')
self.event = self.contrib.event
self.timezone = self.event.timezone
super().__init__(*args, **kwargs)
def validate_start_dt(self, field):
event = self.contrib.event
day = self.contrib.start_dt.astimezone(event.tzinfo).date()
if day == event.end_dt_local.date():
latest_dt = event.end_dt
error_msg = _('With this time, the contribution would exceed the event end time.')
else:
latest_dt = get_day_end(day, tzinfo=event.tzinfo)
error_msg = _('With this time, the contribution would exceed the current day.')
if field.data + self.contrib.duration > latest_dt:
raise ValidationError(error_msg)
class ContributionDurationForm(IndicoForm):
duration = IndicoDurationField(_('Duration'), [DataRequired(), MaxDuration(timedelta(hours=24))],
default=timedelta(minutes=20))
def __init__(self, *args, **kwargs):
self.contrib = kwargs.pop('contrib')
super().__init__(*args, **kwargs)
def validate_duration(self, field):
if field.errors:
return
if self.contrib.is_scheduled:
event = self.contrib.event
day = self.contrib.start_dt.astimezone(event.tzinfo).date()
if day == event.end_dt_local.date():
latest_dt = event.end_dt
error_msg = _('With this duration, the contribution would exceed the event end time.')
else:
latest_dt = get_day_end(day, tzinfo=event.tzinfo)
error_msg = _('With this duration, the contribution would exceed the current day.')
if self.contrib.start_dt + field.data > latest_dt:
raise ValidationError(error_msg)
class ContributionDefaultDurationForm(IndicoForm):
duration = IndicoDurationField(_('Duration'), [DataRequired(), MaxDuration(timedelta(hours=24))],
default=timedelta(minutes=20))
class ContributionTypeForm(IndicoForm):
"""Form to create or edit a ContributionType."""
name = StringField(_('Name'), [DataRequired()])
is_private = BooleanField(_('Private'), widget=SwitchWidget(),
description=_('If selected, this contribution type cannot be chosen by users '
'submitting an abstract.'))
description = TextAreaField(_('Description'))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.contrib_type = kwargs.get('obj')
super().__init__(*args, **kwargs)
def validate_name(self, field):
query = self.event.contribution_types.filter(db.func.lower(ContributionType.name) == field.data.lower())
if self.contrib_type:
query = query.filter(ContributionType.id != self.contrib_type.id)
if query.count():
raise ValidationError(_('A contribution type with this name already exists'))
class ContributionExportTeXForm(IndicoForm):
"""Form for TeX-based export selection"""
format = SelectField(_('Format'), default='PDF')
sort_by = IndicoEnumSelectField(_('Sort by'), enum=BOASortField, default=BOASortField.abstract_title,
sorted=True)
contribution_ids = HiddenFieldList()
submitted = HiddenField()
def __init__(self, *args, **kwargs):
self.contribs = kwargs.get('contribs')
super().__init__(*args, **kwargs)
if not self.contribution_ids.data:
self.contribution_ids.data = [c.id for c in self.contribs]
def is_submitted(self):
return super().is_submitted() and 'submitted' in request.form
| |
extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']
master_doc = "index"
link_files = {
'../CHANGES.rst': dict(
using=dict(
BB='https://bitbucket.org',
GH='https://github.com',
),
replace=[
dict(
pattern=r'(?<!\w)(Issue )?#(?P<issue>\d+)',
url='{package_url}/issues/{issue}',
),
dict(
pattern=r'BB Pull Request ?#(?P<bb_pull_request>\d+)',
url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',
),
dict(
pattern=r'Distribute #(?P<distribute>\d+)',
url='{BB}/tarek/distribute/issue/{distribute}',
),
dict(
pattern=r'Buildout #(?P<buildout>\d+)',
url='{GH}/buildout/buildout/issues/{buildout}',
),
dict(
pattern=r'Old Setuptools #(?P<old_setuptools>\d+)',
url='http://bugs.python.org/setuptools/issue{old_setuptools}',
),
dict(
pattern=r'Jython #(?P<jython>\d+)',
url='http://bugs.jython.org/issue{jython}',
),
dict(
pattern=r'(Python #|bpo-)(?P<python>\d+)',
url='http://bugs.python.org/issue{python}',
),
dict(
pattern=r'Interop #(?P<interop>\d+)',
url='{GH}/pypa/interoperability-peps/issues/{interop}',
),
dict(
pattern=r'Pip #(?P<pip>\d+)',
url='{GH}/pypa/pip/issues/{pip}',
),
dict(
pattern=r'Packaging #(?P<packaging>\d+)',
url='{GH}/pypa/packaging/issues/{packaging}',
),
dict(
pattern=r'[Pp]ackaging (?P<packaging_ver>\d+(\.\d+)+)',
url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',
),
dict(
pattern=r'(?<![`/\w])PEP[- ](?P<pep_number>\d+)',
url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',
),
dict(
pattern=r'setuptools_svn #(?P<setuptools_svn>\d+)',
url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',
),
dict(
pattern=r'pypa/(?P<issue_repo>[\-\.\w]+)#(?P<issue_number>\d+)',
url='{GH}/pypa/{issue_repo}/issues/{issue_number}',
),
dict(
pattern=r'pypa/(?P<commit_repo>[\-\.\w]+)@(?P<commit_number>[\da-f]+)',
url='{GH}/pypa/{commit_repo}/commit/{commit_number}',
),
dict(
pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n',
),
],
),
}
# Be strict about any broken references:
nitpicky = True
# Include Python intersphinx mapping to prevent failures
# jaraco/skeleton#51
extensions += ['sphinx.ext.intersphinx']
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
}
intersphinx_mapping.update({
'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)
})
# Add support for linking usernames
github_url = 'https://github.com'
github_sponsors_url = f'{github_url}/sponsors'
extlinks = {
'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323
'pypi': ('https://pypi.org/project/%s', '%s'),
}
extensions += ['sphinx.ext.extlinks']
# Ref: https://github.com/python-attrs/attrs/pull/571/files\
# #diff-85987f48f1258d9ee486e3191495582dR82
default_role = 'any'
# HTML theme
html_theme = 'furo'
html_logo = "images/logo.svg"
html_theme_options = {
"sidebar_hide_name": True,
"light_css_variables": {
"color-brand-primary": "#336790", # "blue"
"color-brand-content": "#336790",
},
"dark_css_variables": {
"color-brand-primary": "#E5B62F", # "yellow"
"color-brand-content": "#E5B62F",
},
}
# Add support for inline tabs
extensions += ['sphinx_inline_tabs']
# Support for distutils
# Ref: https://stackoverflow.com/a/30624034/595220
nitpick_ignore = [
('c:func', 'SHGetSpecialFolderPath'), # ref to MS docs
('envvar', 'DISTUTILS_DEBUG'), # undocumented
('envvar', 'HOME'), # undocumented
('envvar', 'PLAT'), # undocumented
('py:attr', 'CCompiler.language_map'), # undocumented
('py:attr', 'CCompiler.language_order'), # undocumented
('py:class', 'distutils.dist.Distribution'), # undocumented
('py:class', 'distutils.extension.Extension'), # undocumented
('py:class', 'BorlandCCompiler'), # undocumented
('py:class', 'CCompiler'), # undocumented
('py:class', 'CygwinCCompiler'), # undocumented
('py:class', 'distutils.dist.DistributionMetadata'), # undocumented
('py:class', 'FileList'), # undocumented
('py:class', 'IShellLink'), # ref to MS docs
('py:class', 'MSVCCompiler'), # undocumented
('py:class', 'OptionDummy'), # undocumented
('py:class', 'UnixCCompiler'), # undocumented
('py:exc', 'CompileError'), # undocumented
('py:exc', 'DistutilsExecError'), # undocumented
('py:exc', 'DistutilsFileError'), # undocumented
('py:exc', 'LibError'), # undocumented
('py:exc', 'LinkError'), # undocumented
('py:exc', 'PreprocessError'), # undocumented
('py:func', 'distutils.CCompiler.new_compiler'), # undocumented
# undocumented:
('py:func', 'distutils.dist.DistributionMetadata.read_pkg_file'),
('py:func', 'distutils.file_util._copy_file_contents'), # undocumented
('py:func', 'distutils.log.debug'), # undocumented
('py:func', 'distutils.spawn.find_executable'), # undocumented
('py:func', 'distutils.spawn.spawn'), # undocumented
# TODO: check https://docutils.rtfd.io in the future
('py:mod', 'docutils'), # there's no Sphinx site documenting this
]
# Allow linking objects on other Sphinx sites seamlessly:
intersphinx_mapping.update(
python=('https://docs.python.org/3', None),
python2=('https://docs.python.org/2', None),
)
# Add support for the unreleased "next-version" change notes
extensions += ['sphinxcontrib.towncrier']
# Extension needs a path from here to the towncrier config.
towncrier_draft_working_directory = '..'
# Avoid an empty section for unpublished changes.
towncrier_draft_include_empty = False
extensions += ['jaraco.tidelift']
# Add icons (aka "favicons") to documentation
extensions += ['sphinx-favicon']
html_static_path = ['images'] # should contain the folder with icons
# List of dicts with <link> HTML attributes
# static-file points to files in the html_static_path (href is computed)
favicons = [
{ # "Catch-all" goes first, otherwise some browsers will overwrite
"rel": "icon",
"type": "image/svg+xml",
"static-file": "logo-symbol-only.svg",
"sizes": "any"
},
{ # Version with thicker strokes for better visibility at smaller sizes
"rel": "icon",
"type": "image/svg+xml",
"static-file": "favicon.svg",
"sizes": "16x16 24x24 32x32 48x48"
},
# rel="apple-touch-icon" does not support SVG yet
]
intersphinx_mapping['pip'] = 'https://pip.pypa.io/en/latest', None
intersphinx_mapping['PyPUG'] = ('https://packaging.python.org/en/latest/', None)
intersphinx_mapping['importlib-resources'] = (
'https://importlib-resources.readthedocs.io/en/latest', None
)
| |
import cPickle
import Cookie
import hmac
import os
import random
import time
from datetime import datetime, timedelta
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
# Use PyCrypto (if available)
from Crypto.Hash import HMAC, SHA as SHA1
except ImportError:
# PyCrypto not available. Use the Python standard library.
import hmac as HMAC
import sys
# When using the stdlib, we have to make sure the hmac version and sha
# version are compatible
if sys.version_info[0:2] <= (2,4):
# hmac in python2.4 or less require the sha module
import sha as SHA1
else:
# NOTE: We have to use the callable with hashlib (hashlib.sha1),
# otherwise hmac only accepts the sha module object itself
from hashlib import sha1 as SHA1
# Check for pycryptopp encryption for AES
try:
from beaker.crypto import generateCryptoKeys, aesEncrypt
crypto_ok = True
except:
crypto_ok = False
from beaker.cache import clsmap
from beaker.exceptions import BeakerException
from beaker.util import b64decode, b64encode, Set
__all__ = ['SignedCookie', 'Session']
getpid = hasattr(os, 'getpid') and os.getpid or (lambda : '')
class SignedCookie(Cookie.BaseCookie):
"""Extends python cookie to give digital signature support"""
def __init__(self, secret, input=None):
self.secret = secret
Cookie.BaseCookie.__init__(self, input)
def value_decode(self, val):
val = val.strip('"')
sig = HMAC.new(self.secret, val[40:], SHA1).hexdigest()
if sig != val[:40]:
return None, val
else:
return val[40:], val
def value_encode(self, val):
sig = HMAC.new(self.secret, val, SHA1).hexdigest()
return str(val), ("%s%s" % (sig, val))
class Session(dict):
"""Session object that uses container package for storage"""
def __init__(self, request, id=None, invalidate_corrupt=False,
use_cookies=True, type=None, data_dir=None,
key='beaker.session.id', timeout=None, cookie_expires=True,
cookie_domain=None, secret=None, secure=False,
namespace_class=None, **namespace_args):
if not type:
if data_dir:
self.type = 'file'
else:
self.type = 'memory'
else:
self.type = type
self.namespace_class = namespace_class or clsmap[self.type]
self.namespace_args = namespace_args
self.request = request
self.data_dir = data_dir
self.key = key
self.timeout = timeout
self.use_cookies = use_cookies
self.cookie_expires = cookie_expires
# Default cookie domain/path
self._domain = cookie_domain
self._path = '/'
self.was_invalidated = False
self.secret = secret
self.secure = secure
self.id = id
self.accessed_dict = {}
if self.use_cookies:
cookieheader = request.get('cookie', '')
if secret:
try:
self.cookie = SignedCookie(secret, input=cookieheader)
except Cookie.CookieError:
self.cookie = SignedCookie(secret, input=None)
else:
self.cookie = Cookie.SimpleCookie(input=cookieheader)
if not self.id and self.key in self.cookie:
self.id = self.cookie[self.key].value
self.is_new = self.id is None
if self.is_new:
self._create_id()
self['_accessed_time'] = self['_creation_time'] = time.time()
else:
try:
self.load()
except:
if invalidate_corrupt:
self.invalidate()
else:
raise
def _create_id(self):
self.id = md5(
md5("%f%s%f%s" % (time.time(), id({}), random.random(),
getpid())).hexdigest(),
).hexdigest()
self.is_new = True
self.last_accessed = None
if self.use_cookies:
self.cookie[self.key] = self.id
if self._domain:
self.cookie[self.key]['domain'] = self._domain
if self.secure:
self.cookie[self.key]['secure'] = True
self.cookie[self.key]['path'] = self._path
if self.cookie_expires is not True:
if self.cookie_expires is False:
expires = datetime.fromtimestamp( 0x7FFFFFFF )
elif isinstance(self.cookie_expires, timedelta):
expires = datetime.today() + self.cookie_expires
elif isinstance(self.cookie_expires, datetime):
expires = self.cookie_expires
else:
raise ValueError("Invalid argument for cookie_expires: %s"
% repr(self.cookie_expires))
self.cookie[self.key]['expires'] = \
expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
self.request['cookie_out'] = self.cookie[self.key].output(header='')
self.request['set_cookie'] = False
def created(self):
return self['_creation_time']
created = property(created)
def _set_domain(self, domain):
self['_domain'] = domain
self.cookie[self.key]['domain'] = domain
self.request['cookie_out'] = self.cookie[self.key].output(header='')
self.request['set_cookie'] = True
def _get_domain(self, domain):
return self._domain
domain = property(_get_domain, _set_domain)
def _set_path(self, path):
self['_path'] = path
self.cookie[self.key]['path'] = path
self.request['cookie_out'] = self.cookie[self.key].output(header='')
self.request['set_cookie'] = True
def _get_path(self, domain):
return self._path
path = property(_get_path, _set_path)
def _delete_cookie(self):
self.request['set_cookie'] = True
self.cookie[self.key] = self.id
if self._domain:
self.cookie[self.key]['domain'] = self._domain
if self.secure:
self.cookie[self.key]['secure'] = True
self.cookie[self.key]['path'] = '/'
expires = datetime.today().replace(year=2003)
self.cookie[self.key]['expires'] = \
expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
self.request['cookie_out'] = self.cookie[self.key].output(header='')
self.request['set_cookie'] = True
def delete(self):
"""Deletes the session from the persistent storage, and sends
an expired cookie out"""
if self.use_cookies:
self._delete_cookie()
self.clear()
def invalidate(self):
"""Invalidates this session, creates a new session id, returns
to the is_new state"""
self.clear()
self.was_invalidated = True
self._create_id()
self.load()
def load(self):
"Loads the data from this session from persistent storage"
self.namespace = self.namespace_class(self.id,
data_dir=self.data_dir, digest_filenames=False,
**self.namespace_args)
now = time.time()
self.request['set_cookie'] = True
self.namespace.acquire_read_lock()
timed_out = False
try:
self.clear()
try:
session_data = self.namespace['session']
# Memcached always returns a key, its None when its not
# present
if session_data is None:
session_data = {
'_creation_time':now,
'_accessed_time':now
}
self.is_new = True
except (KeyError, TypeError):
session_data = {
'_creation_time':now,
'_accessed_time':now
}
self.is_new = True
if self.timeout is not None and \
now - session_data['_accessed_time'] > self.timeout:
timed_out= True
else:
# Properly set the last_accessed time, which is different
# than the *currently* _accessed_time
if self.is_new or '_accessed_time' not in session_data:
self.last_accessed = None
else:
self.last_accessed = session_data['_accessed_time']
# Update the current _accessed_time
session_data['_accessed_time'] = now
self.update(session_data)
self.accessed_dict = session_data.copy()
finally:
self.namespace.release_read_lock()
if timed_out:
self.invalidate()
def save(self, accessed_only=False):
"""Saves the data for this session to persistent storage
If accessed_only is True, then only the original data loaded
at the beginning of the request will be saved, with the updated
last accessed time.
"""
# Look to see if its a new session that was only accessed
# Don't save it under that case
if accessed_only and self.is_new:
return None
if not hasattr(self, 'namespace'):
self.namespace = self.namespace_class(
self.id,
data_dir=self.data_dir,
digest_filenames=False,
**self.namespace_args)
self.namespace.acquire_write_lock()
try:
if accessed_only:
data = dict(self.accessed_dict.items())
else:
data = dict(self.items())
# Save the data
if not data and 'session' in self.namespace:
del self.namespace['session']
else:
self.namespace['session'] = data
finally:
self.namespace.release_write_lock()
if self.is_new:
self.request['set_cookie'] = True
def revert(self):
"""Revert the session to its original state from its first
access in the request"""
self.clear()
self.update(self.accessed_dict)
# TODO: I think both these methods should be removed. They're from
# the original mod_python code i was ripping off but they really
# have no use here.
def lock(self):
"""Locks this session against other processes/threads. This is
automatic when load/save is called.
***use with caution*** and always with a corresponding 'unlock'
inside a "finally:" block, as a stray lock typically cannot be
unlocked without shutting down the whole application.
"""
self.namespace.acquire_write_lock()
def unlock(self):
"""Unlocks this session against other processes/threads. This
is automatic when load/save is called.
***use with caution*** and always within a "finally:" block, as
a stray lock typically cannot be unlocked without shutting down
the whole application.
"""
self.namespace.release_write_lock()
class CookieSession(Session):
"""Pure cookie-based session
Options recognized when using cookie-based sessions are slightly
more restricted than general sessions.
``key``
The name the cookie should be set to.
``timeout``
How long session data is considered valid. This is used
regardless of the cookie being present or not to determine
whether session data is still valid.
``encrypt_key``
The key to use for the session encryption, if not provided the
session will not be encrypted.
``validate_key``
The key used to sign the encrypted session
``cookie_domain``
Domain to use for the cookie.
``secure``
Whether or not the cookie should only be sent over SSL.
"""
def __init__(self, request, key='beaker.session.id', timeout=None,
cookie_expires=True, cookie_domain=None, encrypt_key=None,
validate_key=None, secure=False, **kwargs):
if not crypto_ok and encrypt_key:
raise BeakerException("pycryptopp is not installed, can't use "
"encrypted cookie-only Session.")
self.request = request
self.key = key
self.timeout = timeout
self.cookie_expires = cookie_expires
self.encrypt_key = encrypt_key
self.validate_key = validate_key
self.request['set_cookie'] = False
self.secure = secure
self._domain = cookie_domain
self._path = '/'
try:
cookieheader = request['cookie']
except KeyError:
cookieheader = ''
if validate_key is None:
raise BeakerException("No validate_key specified for Cookie only "
"Session.")
try:
self.cookie = SignedCookie(validate_key, input=cookieheader)
except Cookie.CookieError:
self.cookie = SignedCookie(validate_key, input=None)
self['_id'] = self._make_id()
self.is_new = True
# If we have a cookie, load it
if self.key in self.cookie and self.cookie[self.key].value is not None:
self.is_new = False
try:
self.update(self._decrypt_data())
except:
pass
if self.timeout is not None and time.time() - \
self['_accessed_time'] > self.timeout:
self.clear()
self.accessed_dict = self.copy()
self._create_cookie()
def created(self):
return self['_creation_time']
created = property(created)
def id(self):
return self['_id']
id = property(id)
def _set_domain(self, domain):
self['_domain'] = domain
self._domain = domain
def _get_domain(self, domain):
return self._domain
domain = property(_get_domain, _set_domain)
def _set_path(self, path):
self['_path'] = path
self._path = path
def _get_path(self, domain):
return self._path
path = property(_get_path, _set_path)
def _encrypt_data(self):
"""Serialize, encipher, and base64 the session dict"""
if self.encrypt_key:
nonce = b64encode(os.urandom(40))[:8]
encrypt_key = generateCryptoKeys(self.encrypt_key,
self.validate_key + nonce, 1)
data = cPickle.dumps(self.copy(), 2)
return nonce + b64encode(aesEncrypt(data, encrypt_key))
else:
data = cPickle.dumps(self.copy(), 2)
return b64encode(data)
def _decrypt_data(self):
"""Bas64, decipher, then un-serialize the data for the session
dict"""
if self.encrypt_key:
nonce = self.cookie[self.key].value[:8]
encrypt_key = generateCryptoKeys(self.encrypt_key,
self.validate_key + nonce, 1)
payload = b64decode(self.cookie[self.key].value[8:])
data = aesEncrypt(payload, encrypt_key)
return cPickle.loads(data)
else:
data = b64decode(self.cookie[self.key].value)
return cPickle.loads(data)
def _make_id(self):
return md5(md5(
"%f%s%f%d" % (time.time(), id({}), random.random(), getpid())
).hexdigest()
).hexdigest()
def save(self, accessed_only=False):
"""Saves the data for this session to persistent storage"""
if accessed_only and self.is_new:
return
if accessed_only:
self.clear()
self.update(self.accessed_dict)
self._create_cookie()
def expire(self):
"""Delete the 'expires' attribute on this Session, if any."""
self.pop('_expires', None)
def _create_cookie(self):
if '_creation_time' not in self:
self['_creation_time'] = time.time()
if '_id' not in self:
self['_id'] = self._make_id()
self['_accessed_time'] = time.time()
if self.cookie_expires is not True:
if self.cookie_expires is False:
expires = datetime.fromtimestamp( 0x7FFFFFFF )
elif isinstance(self.cookie_expires, timedelta):
expires = datetime.today() + self.cookie_expires
elif isinstance(self.cookie_expires, datetime):
expires = self.cookie_expires
else:
raise ValueError("Invalid argument for cookie_expires: %s"
% repr(self.cookie_expires))
self['_expires'] = expires
elif '_expires' in self:
expires = self['_expires']
else:
expires = None
val = self._encrypt_data()
if len(val) > 4064:
raise BeakerException("Cookie value is too long to store")
self.cookie[self.key] = val
if '_domain' in self:
self.cookie[self.key]['domain'] = self['_domain']
elif self._domain:
self.cookie[self.key]['domain'] = self._domain
if self.secure:
self.cookie[self.key]['secure'] = True
self.cookie[self.key]['path'] = self.get('_path', '/')
if expires:
self.cookie[self.key]['expires'] = \
expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
self.request['cookie_out'] = self.cookie[self.key].output(header='')
self.request['set_cookie'] = True
def delete(self):
"""Delete the cookie, and clear the session"""
# Send a delete cookie request
self._delete_cookie()
self.clear()
def invalidate(self):
"""Clear the contents and start a new session"""
self.delete()
self['_id'] = self._make_id()
class SessionObject(object):
"""Session proxy/lazy creator
This object proxies access to the actual session object, so that in
the case that the session hasn't been used before, it will be
setup. This avoid creating and loading the session from persistent
storage unless its actually used during the request.
"""
def __init__(self, environ, **params):
self.__dict__['_params'] = params
self.__dict__['_environ'] = environ
self.__dict__['_sess'] = None
self.__dict__['_headers'] = []
def _session(self):
"""Lazy initial creation of session object"""
if self.__dict__['_sess'] is None:
params = self.__dict__['_params']
environ = self.__dict__['_environ']
self.__dict__['_headers'] = req = {'cookie_out':None}
req['cookie'] = environ.get('HTTP_COOKIE')
if params.get('type') == 'cookie':
self.__dict__['_sess'] = CookieSession(req, **params)
else:
self.__dict__['_sess'] = Session(req, use_cookies=True,
**params)
return self.__dict__['_sess']
def __getattr__(self, attr):
return getattr(self._session(), attr)
def __setattr__(self, attr, value):
setattr(self._session(), attr, value)
def __delattr__(self, name):
self._session().__delattr__(name)
def __getitem__(self, key):
return self._session()[key]
def __setitem__(self, key, value):
self._session()[key] = value
def __delitem__(self, key):
self._session().__delitem__(key)
def __repr__(self):
return self._session().__repr__()
def __iter__(self):
"""Only works for proxying to a dict"""
return iter(self._session().keys())
def __contains__(self, key):
return self._session().has_key(key)
def get_by_id(self, id):
params = self.__dict__['_params']
session = Session({}, use_cookies=False, id=id, **params)
if session.is_new:
return None
return session
def save(self):
self.__dict__['_dirty'] = True
def delete(self):
self.__dict__['_dirty'] = True
self._session().delete()
def persist(self):
"""Persist the session to the storage
If its set to autosave, then the entire session will be saved
regardless of if save() has been called. Otherwise, just the
accessed time will be updated if save() was not called, or
the session will be saved if save() was called.
"""
if self.__dict__['_params'].get('auto'):
self._session().save()
else:
if self.__dict__.get('_dirty'):
self._session().save()
else:
self._session().save(accessed_only=True)
def dirty(self):
return self.__dict__.get('_dirty', False)
def accessed(self):
return self.__dict__['_sess'] is not None
| |
#!/usr/bin/env python3
# Copyright (c) 2018-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet balance RPC methods."""
from decimal import Decimal
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE as ADDRESS_WATCHONLY
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
def create_transactions(node, address, amt, fees):
# Create and sign raw transactions from node to address for amt.
# Creates a transaction for each fee and returns an array
# of the raw transactions.
utxos = [u for u in node.listunspent(0) if u['spendable']]
# Create transactions
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total >= amt + max(fees):
break
# make sure there was enough utxos
assert ins_total >= amt + max(fees)
txs = []
for fee in fees:
outputs = {address: amt}
# prevent 0 change output
if ins_total > amt + fee:
outputs[node.getrawchangeaddress()] = ins_total - amt - fee
raw_tx = node.createrawtransaction(inputs, outputs, 0, True)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
assert_equal(raw_tx['complete'], True)
txs.append(raw_tx)
return txs
class WalletTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-limitdescendantcount=3'], # Limit mempool descendants as a hack to have wallet txs rejected from the mempool
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
if not self.options.descriptors:
# Tests legacy watchonly behavior which is not present (and does not need to be tested) in descriptor wallets
self.nodes[0].importaddress(ADDRESS_WATCHONLY)
# Check that nodes don't own any UTXOs
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Check that only node 0 is watching an address")
assert 'watchonly' in self.nodes[0].getbalances()
assert 'watchonly' not in self.nodes[1].getbalances()
self.log.info("Mining blocks ...")
self.generate(self.nodes[0], 1)
self.generate(self.nodes[1], 1)
self.generatetoaddress(self.nodes[1], COINBASE_MATURITY + 1, ADDRESS_WATCHONLY)
if not self.options.descriptors:
# Tests legacy watchonly behavior which is not present (and does not need to be tested) in descriptor wallets
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getwalletinfo()['balance'], 50)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getbalances()['watchonly']['immature'], 5000)
assert 'watchonly' not in self.nodes[1].getbalances()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 50)
assert_equal(self.nodes[0].getbalance("*", 1), 50)
assert_equal(self.nodes[0].getbalance(minconf=1), 50)
if not self.options.descriptors:
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 100)
assert_equal(self.nodes[0].getbalance("*", 1, True), 100)
else:
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 50)
assert_equal(self.nodes[0].getbalance("*", 1, True), 50)
assert_equal(self.nodes[1].getbalance(minconf=0, include_watchonly=True), 50)
# Send 40 SYS from 0 to 1 and 60 SYS from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test balances with unconfirmed inputs")
# Before `test_balance()`, we have had two nodes with a balance of 50
# each and then we:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 60 from node B to node A with fee 0.01
#
# Then we check the balances:
#
# 1) As is
# 2) With transaction 2 from above with 2x the fee
#
# Prior to #16766, in this situation, the node would immediately report
# a balance of 30 on node B as unconfirmed and trusted.
#
# After #16766, we show that balance as unconfirmed.
#
# The balance is indeed "trusted" and "confirmed" insofar as removing
# the mempool transactions would return at least that much money. But
# the algorithm after #16766 marks it as unconfirmed because the 'taint'
# tracking of transaction trust for summing balances doesn't consider
# which inputs belong to a user. In this case, the change output in
# question could be "destroyed" by replace the 1st transaction above.
#
# The post #16766 behavior is correct; we shouldn't be treating those
# funds as confirmed. If you want to rely on that specific UTXO existing
# which has given you that balance, you cannot, as a third party
# spending the other input would destroy that unconfirmed.
#
# For example, if the test transactions were:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 10 from node B to node A with fee 0.01
#
# Then our node would report a confirmed balance of 40 + 50 - 10 = 80
# SYS, which is more than would be available if transaction 1 were
# replaced.
def test_balances(*, fee_node_1=0):
# getbalances
expected_balances_0 = {'mine': {'immature': Decimal('0E-8'),
'trusted': Decimal('9.99'), # change from node 0's send
'untrusted_pending': Decimal('60.0')},
'watchonly': {'immature': Decimal('5000'),
'trusted': Decimal('50.0'),
'untrusted_pending': Decimal('0E-8')}}
expected_balances_1 = {'mine': {'immature': Decimal('0E-8'),
'trusted': Decimal('0E-8'), # node 1's send had an unsafe input
'untrusted_pending': Decimal('30.0') - fee_node_1}} # Doesn't include output of node 0's send since it was spent
if self.options.descriptors:
del expected_balances_0["watchonly"]
assert_equal(self.nodes[0].getbalances(), expected_balances_0)
assert_equal(self.nodes[1].getbalances(), expected_balances_1)
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('0')) # node 1's send had an unsafe input
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('0'))
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('30') - fee_node_1) # Doesn't include output of node 0's send since it was spent
# getwalletinfo.unconfirmed_balance
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60'))
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('30') - fee_node_1)
test_balances(fee_node_1=Decimal('0.01'))
# Node 1 bumps the transaction fee and resends
self.nodes[1].sendrawtransaction(txs[1]['hex'])
self.nodes[0].sendrawtransaction(txs[1]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
self.log.info("Test getbalance and getbalances.mine.untrusted_pending with conflicted unconfirmed inputs")
test_balances(fee_node_1=Decimal('0.02'))
self.generatetoaddress(self.nodes[1], 1, ADDRESS_WATCHONLY)
# balances are correct after the transactions are confirmed
balance_node0 = Decimal('69.99') # node 1's send plus change from node 0's send
balance_node1 = Decimal('29.98') # change from node 0's send
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], balance_node0)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], balance_node1)
assert_equal(self.nodes[0].getbalance(), balance_node0)
assert_equal(self.nodes[1].getbalance(), balance_node1)
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.generatetoaddress(self.nodes[1], 2, ADDRESS_WATCHONLY)
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
# check mempool transactions count for wallet unconfirmed balance after
# dynamically loading the wallet.
before = self.nodes[1].getbalances()['mine']['untrusted_pending']
dst = self.nodes[1].getnewaddress()
self.nodes[1].unloadwallet(self.default_wallet_name)
self.nodes[0].sendtoaddress(dst, 0.1)
self.sync_all()
self.nodes[1].loadwallet(self.default_wallet_name)
after = self.nodes[1].getbalances()['mine']['untrusted_pending']
assert_equal(before + Decimal('0.1'), after)
# Create 3 more wallet txs, where the last is not accepted to the
# mempool because it is the third descendant of the tx above
for _ in range(3):
# Set amount high enough such that all coins are spent by each tx
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 99)
self.log.info('Check that wallet txs not in the mempool are untrusted')
assert txid not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid)['trusted'], False)
assert_equal(self.nodes[0].getbalance(minconf=0), 0)
self.log.info("Test replacement and reorg of non-mempool tx")
tx_orig = self.nodes[0].gettransaction(txid)['hex']
# Increase fee by 1 coin
tx_replace = tx_orig.replace(
struct.pack("<q", 99 * 10**8).hex(),
struct.pack("<q", 98 * 10**8).hex(),
)
tx_replace = self.nodes[0].signrawtransactionwithwallet(tx_replace)['hex']
# Total balance is given by the sum of outputs of the tx
total_amount = sum([o['value'] for o in self.nodes[0].decoderawtransaction(tx_replace)['vout']])
self.sync_all()
self.nodes[1].sendrawtransaction(hexstring=tx_replace, maxfeerate=0)
# Now confirm tx_replace
block_reorg = self.generatetoaddress(self.nodes[1], 1, ADDRESS_WATCHONLY)[0]
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount)
self.log.info('Put txs back into mempool of node 1 (not node 0)')
self.nodes[0].invalidateblock(block_reorg)
self.nodes[1].invalidateblock(block_reorg)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
self.generatetoaddress(self.nodes[0], 1, ADDRESS_WATCHONLY, sync_fun=self.no_op)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0'])
self.connect_nodes(0, 1)
self.sync_blocks()
self.nodes[1].sendrawtransaction(tx_orig)
self.generatetoaddress(self.nodes[1], 1, ADDRESS_WATCHONLY)
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin
if __name__ == '__main__':
WalletTest().main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.